summaryrefslogtreecommitdiffstats
path: root/lib/sqlalchemy/orm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy/orm')
-rw-r--r--lib/sqlalchemy/orm/__init__.py344
-rw-r--r--lib/sqlalchemy/orm/attributes.py2331
-rw-r--r--lib/sqlalchemy/orm/base.py572
-rw-r--r--lib/sqlalchemy/orm/clsregistry.py441
-rw-r--r--lib/sqlalchemy/orm/collections.py1706
-rw-r--r--lib/sqlalchemy/orm/context.py3136
-rw-r--r--lib/sqlalchemy/orm/decl_api.py1062
-rw-r--r--lib/sqlalchemy/orm/decl_base.py1210
-rw-r--r--lib/sqlalchemy/orm/dependency.py1290
-rw-r--r--lib/sqlalchemy/orm/descriptor_props.py745
-rw-r--r--lib/sqlalchemy/orm/dynamic.py491
-rw-r--r--lib/sqlalchemy/orm/evaluator.py241
-rw-r--r--lib/sqlalchemy/orm/events.py2876
-rw-r--r--lib/sqlalchemy/orm/exc.py204
-rw-r--r--lib/sqlalchemy/orm/identity.py254
-rw-r--r--lib/sqlalchemy/orm/instrumentation.py652
-rw-r--r--lib/sqlalchemy/orm/interfaces.py978
-rw-r--r--lib/sqlalchemy/orm/loading.py1465
-rw-r--r--lib/sqlalchemy/orm/mapper.py3658
-rw-r--r--lib/sqlalchemy/orm/path_registry.py519
-rw-r--r--lib/sqlalchemy/orm/persistence.py2517
-rw-r--r--lib/sqlalchemy/orm/properties.py430
-rw-r--r--lib/sqlalchemy/orm/query.py3508
-rw-r--r--lib/sqlalchemy/orm/relationships.py3684
-rw-r--r--lib/sqlalchemy/orm/scoping.py228
-rw-r--r--lib/sqlalchemy/orm/session.py4386
-rw-r--r--lib/sqlalchemy/orm/state.py1025
-rw-r--r--lib/sqlalchemy/orm/strategies.py3141
-rw-r--r--lib/sqlalchemy/orm/strategy_options.py2008
-rw-r--r--lib/sqlalchemy/orm/sync.py167
-rw-r--r--lib/sqlalchemy/orm/unitofwork.py784
-rw-r--r--lib/sqlalchemy/orm/util.py2149
32 files changed, 48202 insertions, 0 deletions
diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py
new file mode 100644
index 0000000..6e0de05
--- /dev/null
+++ b/lib/sqlalchemy/orm/__init__.py
@@ -0,0 +1,344 @@
+# orm/__init__.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""
+Functional constructs for ORM configuration.
+
+See the SQLAlchemy object relational tutorial and mapper configuration
+documentation for an overview of how this module is used.
+
+"""
+
+from . import exc
+from . import mapper as mapperlib
+from . import strategy_options
+from .attributes import AttributeEvent
+from .attributes import InstrumentedAttribute
+from .attributes import Mapped
+from .attributes import QueryableAttribute
+from .context import QueryContext
+from .decl_api import as_declarative
+from .decl_api import declarative_base
+from .decl_api import declarative_mixin
+from .decl_api import DeclarativeMeta
+from .decl_api import declared_attr
+from .decl_api import has_inherited_table
+from .decl_api import registry
+from .decl_api import synonym_for
+from .descriptor_props import CompositeProperty
+from .descriptor_props import SynonymProperty
+from .identity import IdentityMap
+from .instrumentation import ClassManager
+from .interfaces import EXT_CONTINUE
+from .interfaces import EXT_SKIP
+from .interfaces import EXT_STOP
+from .interfaces import InspectionAttr
+from .interfaces import InspectionAttrInfo
+from .interfaces import MANYTOMANY
+from .interfaces import MANYTOONE
+from .interfaces import MapperProperty
+from .interfaces import NOT_EXTENSION
+from .interfaces import ONETOMANY
+from .interfaces import PropComparator
+from .interfaces import UserDefinedOption
+from .loading import merge_frozen_result
+from .loading import merge_result
+from .mapper import class_mapper
+from .mapper import configure_mappers
+from .mapper import Mapper
+from .mapper import reconstructor
+from .mapper import validates
+from .properties import ColumnProperty
+from .query import AliasOption
+from .query import FromStatement
+from .query import Query
+from .relationships import foreign
+from .relationships import RelationshipProperty
+from .relationships import remote
+from .scoping import scoped_session
+from .session import close_all_sessions
+from .session import make_transient
+from .session import make_transient_to_detached
+from .session import object_session
+from .session import ORMExecuteState
+from .session import Session
+from .session import sessionmaker
+from .session import SessionTransaction
+from .state import AttributeState
+from .state import InstanceState
+from .strategy_options import Load
+from .unitofwork import UOWTransaction
+from .util import aliased
+from .util import Bundle
+from .util import CascadeOptions
+from .util import join
+from .util import LoaderCriteriaOption
+from .util import object_mapper
+from .util import outerjoin
+from .util import polymorphic_union
+from .util import was_deleted
+from .util import with_parent
+from .util import with_polymorphic
+from .. import sql as _sql
+from .. import util as _sa_util
+from ..util.langhelpers import public_factory
+
+
+def create_session(bind=None, **kwargs):
+ r"""Create a new :class:`.Session`
+ with no automation enabled by default.
+
+ This function is used primarily for testing. The usual
+ route to :class:`.Session` creation is via its constructor
+ or the :func:`.sessionmaker` function.
+
+ :param bind: optional, a single Connectable to use for all
+ database access in the created
+ :class:`~sqlalchemy.orm.session.Session`.
+
+ :param \*\*kwargs: optional, passed through to the
+ :class:`.Session` constructor.
+
+ :returns: an :class:`~sqlalchemy.orm.session.Session` instance
+
+ The defaults of create_session() are the opposite of that of
+ :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
+ False, ``autocommit`` is True. In this sense the session acts
+ more like the "classic" SQLAlchemy 0.3 session with these.
+
+ .. deprecated:: 1.4 The "autocommit" parameter will be removed in
+ SQLAlchemy 2.0. :func:`_orm.create_session` will return a
+ :class:`_orm.Session` that does not include "autocommit' behavior
+ in release 2.0.
+
+ Usage::
+
+ >>> from sqlalchemy.orm import create_session
+ >>> session = create_session()
+
+ It is recommended to use :func:`sessionmaker` instead of
+ create_session().
+
+ """
+
+ if kwargs.get("future", False):
+ kwargs.setdefault("autocommit", False)
+ else:
+ kwargs.setdefault("autocommit", True)
+
+ kwargs.setdefault("autoflush", False)
+ kwargs.setdefault("expire_on_commit", False)
+ return Session(bind=bind, **kwargs)
+
+
+with_loader_criteria = public_factory(LoaderCriteriaOption, ".orm")
+
+relationship = public_factory(RelationshipProperty, ".orm.relationship")
+
+
+@_sa_util.deprecated_20("relation", "Please use :func:`.relationship`.")
+def relation(*arg, **kw):
+ """A synonym for :func:`relationship`."""
+
+ return relationship(*arg, **kw)
+
+
+def dynamic_loader(argument, **kw):
+ """Construct a dynamically-loading mapper property.
+
+ This is essentially the same as
+ using the ``lazy='dynamic'`` argument with :func:`relationship`::
+
+ dynamic_loader(SomeClass)
+
+ # is the same as
+
+ relationship(SomeClass, lazy="dynamic")
+
+ See the section :ref:`dynamic_relationship` for more details
+ on dynamic loading.
+
+ """
+ kw["lazy"] = "dynamic"
+ return relationship(argument, **kw)
+
+
+column_property = public_factory(ColumnProperty, ".orm.column_property")
+composite = public_factory(CompositeProperty, ".orm.composite")
+
+
+def backref(name, **kwargs):
+ """When using the :paramref:`_orm.relationship.backref` parameter,
+ provides specific parameters to be used when the new
+ :func:`_orm.relationship` is generated.
+
+ E.g.::
+
+ 'items':relationship(
+ SomeItem, backref=backref('parent', lazy='subquery'))
+
+ The :paramref:`_orm.relationship.backref` parameter is generally
+ considered to be legacy; for modern applications, using
+ explicit :func:`_orm.relationship` constructs linked together using
+ the :paramref:`_orm.relationship.back_populates` parameter should be
+ preferred.
+
+ .. seealso::
+
+ :ref:`relationships_backref` - background on backrefs
+
+ """
+
+ return (name, kwargs)
+
+
+def deferred(*columns, **kw):
+ r"""Indicate a column-based mapped attribute that by default will
+ not load unless accessed.
+
+ :param \*columns: columns to be mapped. This is typically a single
+ :class:`_schema.Column` object,
+ however a collection is supported in order
+ to support multiple columns mapped under the same attribute.
+
+ :param raiseload: boolean, if True, indicates an exception should be raised
+ if the load operation is to take place.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`deferred_raiseload`
+
+ :param \**kw: additional keyword arguments passed to
+ :class:`.ColumnProperty`.
+
+ .. seealso::
+
+ :ref:`deferred`
+
+ """
+ return ColumnProperty(deferred=True, *columns, **kw)
+
+
+def query_expression(default_expr=_sql.null()):
+ """Indicate an attribute that populates from a query-time SQL expression.
+
+ :param default_expr: Optional SQL expression object that will be used in
+ all cases if not assigned later with :func:`_orm.with_expression`.
+ E.g.::
+
+ from sqlalchemy.sql import literal
+
+ class C(Base):
+ #...
+ my_expr = query_expression(literal(1))
+
+ .. versionadded:: 1.3.18
+
+
+ .. versionadded:: 1.2
+
+ .. seealso::
+
+ :ref:`mapper_querytime_expression`
+
+ """
+ prop = ColumnProperty(default_expr)
+ prop.strategy_key = (("query_expression", True),)
+ return prop
+
+
+mapper = public_factory(Mapper, ".orm.mapper")
+
+synonym = public_factory(SynonymProperty, ".orm.synonym")
+
+
+def clear_mappers():
+ """Remove all mappers from all classes.
+
+ .. versionchanged:: 1.4 This function now locates all
+ :class:`_orm.registry` objects and calls upon the
+ :meth:`_orm.registry.dispose` method of each.
+
+ This function removes all instrumentation from classes and disposes
+ of their associated mappers. Once called, the classes are unmapped
+ and can be later re-mapped with new mappers.
+
+ :func:`.clear_mappers` is *not* for normal use, as there is literally no
+ valid usage for it outside of very specific testing scenarios. Normally,
+ mappers are permanent structural components of user-defined classes, and
+ are never discarded independently of their class. If a mapped class
+ itself is garbage collected, its mapper is automatically disposed of as
+ well. As such, :func:`.clear_mappers` is only for usage in test suites
+ that re-use the same classes with different mappings, which is itself an
+ extremely rare use case - the only such use case is in fact SQLAlchemy's
+ own test suite, and possibly the test suites of other ORM extension
+ libraries which intend to test various combinations of mapper construction
+ upon a fixed set of classes.
+
+ """
+
+ mapperlib._dispose_registries(mapperlib._all_registries(), False)
+
+
+joinedload = strategy_options.joinedload._unbound_fn
+contains_eager = strategy_options.contains_eager._unbound_fn
+defer = strategy_options.defer._unbound_fn
+undefer = strategy_options.undefer._unbound_fn
+undefer_group = strategy_options.undefer_group._unbound_fn
+with_expression = strategy_options.with_expression._unbound_fn
+load_only = strategy_options.load_only._unbound_fn
+lazyload = strategy_options.lazyload._unbound_fn
+subqueryload = strategy_options.subqueryload._unbound_fn
+selectinload = strategy_options.selectinload._unbound_fn
+immediateload = strategy_options.immediateload._unbound_fn
+noload = strategy_options.noload._unbound_fn
+raiseload = strategy_options.raiseload._unbound_fn
+defaultload = strategy_options.defaultload._unbound_fn
+selectin_polymorphic = strategy_options.selectin_polymorphic._unbound_fn
+
+
+@_sa_util.deprecated_20("eagerload", "Please use :func:`_orm.joinedload`.")
+def eagerload(*args, **kwargs):
+ """A synonym for :func:`joinedload()`."""
+ return joinedload(*args, **kwargs)
+
+
+contains_alias = public_factory(AliasOption, ".orm.contains_alias")
+
+if True:
+ from .events import AttributeEvents
+ from .events import MapperEvents
+ from .events import InstanceEvents
+ from .events import InstrumentationEvents
+ from .events import QueryEvents
+ from .events import SessionEvents
+
+
+def __go(lcls):
+ global __all__
+ global AppenderQuery
+ from .. import util as sa_util
+ from . import dynamic
+ from . import events
+ from . import loading
+ import inspect as _inspect
+
+ from .dynamic import AppenderQuery
+
+ __all__ = sorted(
+ name
+ for name, obj in lcls.items()
+ if not (name.startswith("_") or _inspect.ismodule(obj))
+ )
+
+ _sa_util.preloaded.import_prefix("sqlalchemy.orm")
+ _sa_util.preloaded.import_prefix("sqlalchemy.ext")
+
+
+__go(locals())
diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py
new file mode 100644
index 0000000..efa20fb
--- /dev/null
+++ b/lib/sqlalchemy/orm/attributes.py
@@ -0,0 +1,2331 @@
+# orm/attributes.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Defines instrumentation for class attributes and their interaction
+with instances.
+
+This module is usually not directly visible to user applications, but
+defines a large part of the ORM's interactivity.
+
+
+"""
+
+import operator
+
+from . import collections
+from . import exc as orm_exc
+from . import interfaces
+from .base import ATTR_EMPTY
+from .base import ATTR_WAS_SET
+from .base import CALLABLES_OK
+from .base import DEFERRED_HISTORY_LOAD
+from .base import INIT_OK
+from .base import instance_dict
+from .base import instance_state
+from .base import instance_str
+from .base import LOAD_AGAINST_COMMITTED
+from .base import manager_of_class
+from .base import NEVER_SET # noqa
+from .base import NO_AUTOFLUSH
+from .base import NO_CHANGE # noqa
+from .base import NO_RAISE
+from .base import NO_VALUE
+from .base import NON_PERSISTENT_OK # noqa
+from .base import PASSIVE_CLASS_MISMATCH # noqa
+from .base import PASSIVE_NO_FETCH
+from .base import PASSIVE_NO_FETCH_RELATED # noqa
+from .base import PASSIVE_NO_INITIALIZE
+from .base import PASSIVE_NO_RESULT
+from .base import PASSIVE_OFF
+from .base import PASSIVE_ONLY_PERSISTENT
+from .base import PASSIVE_RETURN_NO_VALUE
+from .base import RELATED_OBJECT_OK # noqa
+from .base import SQL_OK # noqa
+from .base import state_str
+from .. import event
+from .. import exc
+from .. import inspection
+from .. import util
+from ..sql import base as sql_base
+from ..sql import roles
+from ..sql import traversals
+from ..sql import visitors
+
+
+class NoKey(str):
+ pass
+
+
+NO_KEY = NoKey("no name")
+
+
+@inspection._self_inspects
+class QueryableAttribute(
+ interfaces._MappedAttribute,
+ interfaces.InspectionAttr,
+ interfaces.PropComparator,
+ traversals.HasCopyInternals,
+ roles.JoinTargetRole,
+ roles.OnClauseRole,
+ sql_base.Immutable,
+ sql_base.MemoizedHasCacheKey,
+):
+ """Base class for :term:`descriptor` objects that intercept
+ attribute events on behalf of a :class:`.MapperProperty`
+ object. The actual :class:`.MapperProperty` is accessible
+ via the :attr:`.QueryableAttribute.property`
+ attribute.
+
+
+ .. seealso::
+
+ :class:`.InstrumentedAttribute`
+
+ :class:`.MapperProperty`
+
+ :attr:`_orm.Mapper.all_orm_descriptors`
+
+ :attr:`_orm.Mapper.attrs`
+ """
+
+ is_attribute = True
+
+ # PropComparator has a __visit_name__ to participate within
+ # traversals. Disambiguate the attribute vs. a comparator.
+ __visit_name__ = "orm_instrumented_attribute"
+
+ def __init__(
+ self,
+ class_,
+ key,
+ parententity,
+ impl=None,
+ comparator=None,
+ of_type=None,
+ extra_criteria=(),
+ ):
+ self.class_ = class_
+ self.key = key
+ self._parententity = parententity
+ self.impl = impl
+ self.comparator = comparator
+ self._of_type = of_type
+ self._extra_criteria = extra_criteria
+
+ manager = manager_of_class(class_)
+ # manager is None in the case of AliasedClass
+ if manager:
+ # propagate existing event listeners from
+ # immediate superclass
+ for base in manager._bases:
+ if key in base:
+ self.dispatch._update(base[key].dispatch)
+ if base[key].dispatch._active_history:
+ self.dispatch._active_history = True
+
+ _cache_key_traversal = [
+ ("key", visitors.ExtendedInternalTraversal.dp_string),
+ ("_parententity", visitors.ExtendedInternalTraversal.dp_multi),
+ ("_of_type", visitors.ExtendedInternalTraversal.dp_multi),
+ ("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
+ ]
+
+ def __reduce__(self):
+ # this method is only used in terms of the
+ # sqlalchemy.ext.serializer extension
+ return (
+ _queryable_attribute_unreduce,
+ (
+ self.key,
+ self._parententity.mapper.class_,
+ self._parententity,
+ self._parententity.entity,
+ ),
+ )
+
+ @util.memoized_property
+ def _supports_population(self):
+ return self.impl.supports_population
+
+ @property
+ def _impl_uses_objects(self):
+ return self.impl.uses_objects
+
+ def get_history(self, instance, passive=PASSIVE_OFF):
+ return self.impl.get_history(
+ instance_state(instance), instance_dict(instance), passive
+ )
+
+ @util.memoized_property
+ def info(self):
+ """Return the 'info' dictionary for the underlying SQL element.
+
+ The behavior here is as follows:
+
+ * If the attribute is a column-mapped property, i.e.
+ :class:`.ColumnProperty`, which is mapped directly
+ to a schema-level :class:`_schema.Column` object, this attribute
+ will return the :attr:`.SchemaItem.info` dictionary associated
+ with the core-level :class:`_schema.Column` object.
+
+ * If the attribute is a :class:`.ColumnProperty` but is mapped to
+ any other kind of SQL expression other than a
+ :class:`_schema.Column`,
+ the attribute will refer to the :attr:`.MapperProperty.info`
+ dictionary associated directly with the :class:`.ColumnProperty`,
+ assuming the SQL expression itself does not have its own ``.info``
+ attribute (which should be the case, unless a user-defined SQL
+ construct has defined one).
+
+ * If the attribute refers to any other kind of
+ :class:`.MapperProperty`, including :class:`.RelationshipProperty`,
+ the attribute will refer to the :attr:`.MapperProperty.info`
+ dictionary associated with that :class:`.MapperProperty`.
+
+ * To access the :attr:`.MapperProperty.info` dictionary of the
+ :class:`.MapperProperty` unconditionally, including for a
+ :class:`.ColumnProperty` that's associated directly with a
+ :class:`_schema.Column`, the attribute can be referred to using
+ :attr:`.QueryableAttribute.property` attribute, as
+ ``MyClass.someattribute.property.info``.
+
+ .. seealso::
+
+ :attr:`.SchemaItem.info`
+
+ :attr:`.MapperProperty.info`
+
+ """
+ return self.comparator.info
+
+ @util.memoized_property
+ def parent(self):
+ """Return an inspection instance representing the parent.
+
+ This will be either an instance of :class:`_orm.Mapper`
+ or :class:`.AliasedInsp`, depending upon the nature
+ of the parent entity which this attribute is associated
+ with.
+
+ """
+ return inspection.inspect(self._parententity)
+
+ @util.memoized_property
+ def expression(self):
+ """The SQL expression object represented by this
+ :class:`.QueryableAttribute`.
+
+ This will typically be an instance of a :class:`_sql.ColumnElement`
+ subclass representing a column expression.
+
+ """
+ if self.key is NO_KEY:
+ annotations = {"entity_namespace": self._entity_namespace}
+ else:
+ annotations = {
+ "proxy_key": self.key,
+ "proxy_owner": self._parententity,
+ "entity_namespace": self._entity_namespace,
+ }
+
+ ce = self.comparator.__clause_element__()
+ try:
+ anno = ce._annotate
+ except AttributeError as ae:
+ util.raise_(
+ exc.InvalidRequestError(
+ 'When interpreting attribute "%s" as a SQL expression, '
+ "expected __clause_element__() to return "
+ "a ClauseElement object, got: %r" % (self, ce)
+ ),
+ from_=ae,
+ )
+ else:
+ return anno(annotations)
+
+ @property
+ def _entity_namespace(self):
+ return self._parententity
+
+ @property
+ def _annotations(self):
+ return self.__clause_element__()._annotations
+
+ def __clause_element__(self):
+ return self.expression
+
+ @property
+ def _from_objects(self):
+ return self.expression._from_objects
+
+ def _bulk_update_tuples(self, value):
+ """Return setter tuples for a bulk UPDATE."""
+
+ return self.comparator._bulk_update_tuples(value)
+
+ def adapt_to_entity(self, adapt_to_entity):
+ assert not self._of_type
+ return self.__class__(
+ adapt_to_entity.entity,
+ self.key,
+ impl=self.impl,
+ comparator=self.comparator.adapt_to_entity(adapt_to_entity),
+ parententity=adapt_to_entity,
+ )
+
+ def of_type(self, entity):
+ return QueryableAttribute(
+ self.class_,
+ self.key,
+ self._parententity,
+ impl=self.impl,
+ comparator=self.comparator.of_type(entity),
+ of_type=inspection.inspect(entity),
+ extra_criteria=self._extra_criteria,
+ )
+
+ def and_(self, *other):
+ return QueryableAttribute(
+ self.class_,
+ self.key,
+ self._parententity,
+ impl=self.impl,
+ comparator=self.comparator.and_(*other),
+ of_type=self._of_type,
+ extra_criteria=self._extra_criteria + other,
+ )
+
+ def _clone(self, **kw):
+ return QueryableAttribute(
+ self.class_,
+ self.key,
+ self._parententity,
+ impl=self.impl,
+ comparator=self.comparator,
+ of_type=self._of_type,
+ extra_criteria=self._extra_criteria,
+ )
+
+ def label(self, name):
+ return self.__clause_element__().label(name)
+
+ def operate(self, op, *other, **kwargs):
+ return op(self.comparator, *other, **kwargs)
+
+ def reverse_operate(self, op, other, **kwargs):
+ return op(other, self.comparator, **kwargs)
+
+ def hasparent(self, state, optimistic=False):
+ return self.impl.hasparent(state, optimistic=optimistic) is not False
+
+ def __getattr__(self, key):
+ try:
+ return getattr(self.comparator, key)
+ except AttributeError as err:
+ util.raise_(
+ AttributeError(
+ "Neither %r object nor %r object associated with %s "
+ "has an attribute %r"
+ % (
+ type(self).__name__,
+ type(self.comparator).__name__,
+ self,
+ key,
+ )
+ ),
+ replace_context=err,
+ )
+
+ def __str__(self):
+ return "%s.%s" % (self.class_.__name__, self.key)
+
+ @util.memoized_property
+ def property(self):
+ """Return the :class:`.MapperProperty` associated with this
+ :class:`.QueryableAttribute`.
+
+
+ Return values here will commonly be instances of
+ :class:`.ColumnProperty` or :class:`.RelationshipProperty`.
+
+
+ """
+ return self.comparator.property
+
+
+def _queryable_attribute_unreduce(key, mapped_class, parententity, entity):
+ # this method is only used in terms of the
+ # sqlalchemy.ext.serializer extension
+ if parententity.is_aliased_class:
+ return entity._get_from_serialized(key, mapped_class, parententity)
+ else:
+ return getattr(entity, key)
+
+
+if util.py3k:
+ from typing import TypeVar, Generic
+
+ _T = TypeVar("_T")
+ _Generic_T = Generic[_T]
+else:
+ _Generic_T = type("_Generic_T", (), {})
+
+
+class Mapped(QueryableAttribute, _Generic_T):
+ """Represent an ORM mapped :term:`descriptor` attribute for typing
+ purposes.
+
+ This class represents the complete descriptor interface for any class
+ attribute that will have been :term:`instrumented` by the ORM
+ :class:`_orm.Mapper` class. When used with typing stubs, it is the final
+ type that would be used by a type checker such as mypy to provide the full
+ behavioral contract for the attribute.
+
+ .. tip::
+
+ The :class:`_orm.Mapped` class represents attributes that are handled
+ directly by the :class:`_orm.Mapper` class. It does not include other
+ Python descriptor classes that are provided as extensions, including
+ :ref:`hybrids_toplevel` and the :ref:`associationproxy_toplevel`.
+ While these systems still make use of ORM-specific superclasses
+ and structures, they are not :term:`instrumented` by the
+ :class:`_orm.Mapper` and instead provide their own functionality
+ when they are accessed on a class.
+
+ When using the :ref:`SQLAlchemy Mypy plugin <mypy_toplevel>`, the
+ :class:`_orm.Mapped` construct is used in typing annotations to indicate to
+ the plugin those attributes that are expected to be mapped; the plugin also
+ applies :class:`_orm.Mapped` as an annotation automatically when it scans
+ through declarative mappings in :ref:`orm_declarative_table` style. For
+ more indirect mapping styles such as
+ :ref:`imperative table <orm_imperative_table_configuration>` it is
+ typically applied explicitly to class level attributes that expect
+ to be mapped based on a given :class:`_schema.Table` configuration.
+
+ :class:`_orm.Mapped` is defined in the
+ `sqlalchemy2-stubs <https://pypi.org/project/sqlalchemy2-stubs>`_ project
+ as a :pep:`484` generic class which may subscribe to any arbitrary Python
+ type, which represents the Python type handled by the attribute::
+
+ class MyMappedClass(Base):
+ __table_ = Table(
+ "some_table", Base.metadata,
+ Column("id", Integer, primary_key=True),
+ Column("data", String(50)),
+ Column("created_at", DateTime)
+ )
+
+ id : Mapped[int]
+ data: Mapped[str]
+ created_at: Mapped[datetime]
+
+ For complete background on how to use :class:`_orm.Mapped` with
+ pep-484 tools like Mypy, see the link below for background on SQLAlchemy's
+ Mypy plugin.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`mypy_toplevel` - complete background on Mypy integration
+
+ """
+
+ def __get__(self, instance, owner):
+ raise NotImplementedError()
+
+ def __set__(self, instance, value):
+ raise NotImplementedError()
+
+ def __delete__(self, instance):
+ raise NotImplementedError()
+
+
+class InstrumentedAttribute(Mapped):
+ """Class bound instrumented attribute which adds basic
+ :term:`descriptor` methods.
+
+ See :class:`.QueryableAttribute` for a description of most features.
+
+
+ """
+
+ inherit_cache = True
+
+ def __set__(self, instance, value):
+ self.impl.set(
+ instance_state(instance), instance_dict(instance), value, None
+ )
+
+ def __delete__(self, instance):
+ self.impl.delete(instance_state(instance), instance_dict(instance))
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ return self
+
+ dict_ = instance_dict(instance)
+ if self._supports_population and self.key in dict_:
+ return dict_[self.key]
+ else:
+ try:
+ state = instance_state(instance)
+ except AttributeError as err:
+ util.raise_(
+ orm_exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+ return self.impl.get(state, dict_)
+
+
+HasEntityNamespace = util.namedtuple(
+ "HasEntityNamespace", ["entity_namespace"]
+)
+HasEntityNamespace.is_mapper = HasEntityNamespace.is_aliased_class = False
+
+
+def create_proxied_attribute(descriptor):
+ """Create an QueryableAttribute / user descriptor hybrid.
+
+ Returns a new QueryableAttribute type that delegates descriptor
+ behavior and getattr() to the given descriptor.
+ """
+
+ # TODO: can move this to descriptor_props if the need for this
+ # function is removed from ext/hybrid.py
+
+ class Proxy(QueryableAttribute):
+ """Presents the :class:`.QueryableAttribute` interface as a
+ proxy on top of a Python descriptor / :class:`.PropComparator`
+ combination.
+
+ """
+
+ _extra_criteria = ()
+
+ def __init__(
+ self,
+ class_,
+ key,
+ descriptor,
+ comparator,
+ adapt_to_entity=None,
+ doc=None,
+ original_property=None,
+ ):
+ self.class_ = class_
+ self.key = key
+ self.descriptor = descriptor
+ self.original_property = original_property
+ self._comparator = comparator
+ self._adapt_to_entity = adapt_to_entity
+ self.__doc__ = doc
+
+ _is_internal_proxy = True
+
+ _cache_key_traversal = [
+ ("key", visitors.ExtendedInternalTraversal.dp_string),
+ ("_parententity", visitors.ExtendedInternalTraversal.dp_multi),
+ ]
+
+ @property
+ def _impl_uses_objects(self):
+ return (
+ self.original_property is not None
+ and getattr(self.class_, self.key).impl.uses_objects
+ )
+
+ @property
+ def _parententity(self):
+ return inspection.inspect(self.class_, raiseerr=False)
+
+ @property
+ def _entity_namespace(self):
+ if hasattr(self._comparator, "_parententity"):
+ return self._comparator._parententity
+ else:
+ # used by hybrid attributes which try to remain
+ # agnostic of any ORM concepts like mappers
+ return HasEntityNamespace(self.class_)
+
+ @property
+ def property(self):
+ return self.comparator.property
+
+ @util.memoized_property
+ def comparator(self):
+ if callable(self._comparator):
+ self._comparator = self._comparator()
+ if self._adapt_to_entity:
+ self._comparator = self._comparator.adapt_to_entity(
+ self._adapt_to_entity
+ )
+ return self._comparator
+
+ def adapt_to_entity(self, adapt_to_entity):
+ return self.__class__(
+ adapt_to_entity.entity,
+ self.key,
+ self.descriptor,
+ self._comparator,
+ adapt_to_entity,
+ )
+
+ def _clone(self, **kw):
+ return self.__class__(
+ self.class_,
+ self.key,
+ self.descriptor,
+ self._comparator,
+ adapt_to_entity=self._adapt_to_entity,
+ original_property=self.original_property,
+ )
+
+ def __get__(self, instance, owner):
+ retval = self.descriptor.__get__(instance, owner)
+ # detect if this is a plain Python @property, which just returns
+ # itself for class level access. If so, then return us.
+ # Otherwise, return the object returned by the descriptor.
+ if retval is self.descriptor and instance is None:
+ return self
+ else:
+ return retval
+
+ def __str__(self):
+ return "%s.%s" % (self.class_.__name__, self.key)
+
+ def __getattr__(self, attribute):
+ """Delegate __getattr__ to the original descriptor and/or
+ comparator."""
+ try:
+ return getattr(descriptor, attribute)
+ except AttributeError as err:
+ if attribute == "comparator":
+ util.raise_(
+ AttributeError("comparator"), replace_context=err
+ )
+ try:
+ # comparator itself might be unreachable
+ comparator = self.comparator
+ except AttributeError as err2:
+ util.raise_(
+ AttributeError(
+ "Neither %r object nor unconfigured comparator "
+ "object associated with %s has an attribute %r"
+ % (type(descriptor).__name__, self, attribute)
+ ),
+ replace_context=err2,
+ )
+ else:
+ try:
+ return getattr(comparator, attribute)
+ except AttributeError as err3:
+ util.raise_(
+ AttributeError(
+ "Neither %r object nor %r object "
+ "associated with %s has an attribute %r"
+ % (
+ type(descriptor).__name__,
+ type(comparator).__name__,
+ self,
+ attribute,
+ )
+ ),
+ replace_context=err3,
+ )
+
+ Proxy.__name__ = type(descriptor).__name__ + "Proxy"
+
+ util.monkeypatch_proxied_specials(
+ Proxy, type(descriptor), name="descriptor", from_instance=descriptor
+ )
+ return Proxy
+
+
+OP_REMOVE = util.symbol("REMOVE")
+OP_APPEND = util.symbol("APPEND")
+OP_REPLACE = util.symbol("REPLACE")
+OP_BULK_REPLACE = util.symbol("BULK_REPLACE")
+OP_MODIFIED = util.symbol("MODIFIED")
+
+
+class AttributeEvent(object):
+ """A token propagated throughout the course of a chain of attribute
+ events.
+
+ Serves as an indicator of the source of the event and also provides
+ a means of controlling propagation across a chain of attribute
+ operations.
+
+ The :class:`.Event` object is sent as the ``initiator`` argument
+ when dealing with events such as :meth:`.AttributeEvents.append`,
+ :meth:`.AttributeEvents.set`,
+ and :meth:`.AttributeEvents.remove`.
+
+ The :class:`.Event` object is currently interpreted by the backref
+ event handlers, and is used to control the propagation of operations
+ across two mutually-dependent attributes.
+
+ .. versionadded:: 0.9.0
+
+ :attribute impl: The :class:`.AttributeImpl` which is the current event
+ initiator.
+
+ :attribute op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE`,
+ :attr:`.OP_REPLACE`, or :attr:`.OP_BULK_REPLACE`, indicating the
+ source operation.
+
+ """
+
+ __slots__ = "impl", "op", "parent_token"
+
+ def __init__(self, attribute_impl, op):
+ self.impl = attribute_impl
+ self.op = op
+ self.parent_token = self.impl.parent_token
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, AttributeEvent)
+ and other.impl is self.impl
+ and other.op == self.op
+ )
+
+ @property
+ def key(self):
+ return self.impl.key
+
+ def hasparent(self, state):
+ return self.impl.hasparent(state)
+
+
+Event = AttributeEvent
+
+
+class AttributeImpl(object):
+ """internal implementation for instrumented attributes."""
+
+ def __init__(
+ self,
+ class_,
+ key,
+ callable_,
+ dispatch,
+ trackparent=False,
+ compare_function=None,
+ active_history=False,
+ parent_token=None,
+ load_on_unexpire=True,
+ send_modified_events=True,
+ accepts_scalar_loader=None,
+ **kwargs
+ ):
+ r"""Construct an AttributeImpl.
+
+ :param \class_: associated class
+
+ :param key: string name of the attribute
+
+ :param \callable_:
+ optional function which generates a callable based on a parent
+ instance, which produces the "default" values for a scalar or
+ collection attribute when it's first accessed, if not present
+ already.
+
+ :param trackparent:
+ if True, attempt to track if an instance has a parent attached
+ to it via this attribute.
+
+ :param compare_function:
+ a function that compares two values which are normally
+ assignable to this attribute.
+
+ :param active_history:
+ indicates that get_history() should always return the "old" value,
+ even if it means executing a lazy callable upon attribute change.
+
+ :param parent_token:
+ Usually references the MapperProperty, used as a key for
+ the hasparent() function to identify an "owning" attribute.
+ Allows multiple AttributeImpls to all match a single
+ owner attribute.
+
+ :param load_on_unexpire:
+ if False, don't include this attribute in a load-on-expired
+ operation, i.e. the "expired_attribute_loader" process.
+ The attribute can still be in the "expired" list and be
+ considered to be "expired". Previously, this flag was called
+ "expire_missing" and is only used by a deferred column
+ attribute.
+
+ :param send_modified_events:
+ if False, the InstanceState._modified_event method will have no
+ effect; this means the attribute will never show up as changed in a
+ history entry.
+
+ """
+ self.class_ = class_
+ self.key = key
+ self.callable_ = callable_
+ self.dispatch = dispatch
+ self.trackparent = trackparent
+ self.parent_token = parent_token or self
+ self.send_modified_events = send_modified_events
+ if compare_function is None:
+ self.is_equal = operator.eq
+ else:
+ self.is_equal = compare_function
+
+ if accepts_scalar_loader is not None:
+ self.accepts_scalar_loader = accepts_scalar_loader
+ else:
+ self.accepts_scalar_loader = self.default_accepts_scalar_loader
+
+ _deferred_history = kwargs.pop("_deferred_history", False)
+ self._deferred_history = _deferred_history
+
+ if active_history:
+ self.dispatch._active_history = True
+
+ self.load_on_unexpire = load_on_unexpire
+ self._modified_token = Event(self, OP_MODIFIED)
+
+ __slots__ = (
+ "class_",
+ "key",
+ "callable_",
+ "dispatch",
+ "trackparent",
+ "parent_token",
+ "send_modified_events",
+ "is_equal",
+ "load_on_unexpire",
+ "_modified_token",
+ "accepts_scalar_loader",
+ "_deferred_history",
+ )
+
+ def __str__(self):
+ return "%s.%s" % (self.class_.__name__, self.key)
+
+ def _get_active_history(self):
+ """Backwards compat for impl.active_history"""
+
+ return self.dispatch._active_history
+
+ def _set_active_history(self, value):
+ self.dispatch._active_history = value
+
+ active_history = property(_get_active_history, _set_active_history)
+
+ def hasparent(self, state, optimistic=False):
+ """Return the boolean value of a `hasparent` flag attached to
+ the given state.
+
+ The `optimistic` flag determines what the default return value
+ should be if no `hasparent` flag can be located.
+
+ As this function is used to determine if an instance is an
+ *orphan*, instances that were loaded from storage should be
+ assumed to not be orphans, until a True/False value for this
+ flag is set.
+
+ An instance attribute that is loaded by a callable function
+ will also not have a `hasparent` flag.
+
+ """
+ msg = "This AttributeImpl is not configured to track parents."
+ assert self.trackparent, msg
+
+ return (
+ state.parents.get(id(self.parent_token), optimistic) is not False
+ )
+
+ def sethasparent(self, state, parent_state, value):
+ """Set a boolean flag on the given item corresponding to
+ whether or not it is attached to a parent object via the
+ attribute represented by this ``InstrumentedAttribute``.
+
+ """
+ msg = "This AttributeImpl is not configured to track parents."
+ assert self.trackparent, msg
+
+ id_ = id(self.parent_token)
+ if value:
+ state.parents[id_] = parent_state
+ else:
+ if id_ in state.parents:
+ last_parent = state.parents[id_]
+
+ if (
+ last_parent is not False
+ and last_parent.key != parent_state.key
+ ):
+
+ if last_parent.obj() is None:
+ raise orm_exc.StaleDataError(
+ "Removing state %s from parent "
+ "state %s along attribute '%s', "
+ "but the parent record "
+ "has gone stale, can't be sure this "
+ "is the most recent parent."
+ % (
+ state_str(state),
+ state_str(parent_state),
+ self.key,
+ )
+ )
+
+ return
+
+ state.parents[id_] = False
+
+ def get_history(self, state, dict_, passive=PASSIVE_OFF):
+ raise NotImplementedError()
+
+ def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
+ """Return a list of tuples of (state, obj)
+ for all objects in this attribute's current state
+ + history.
+
+ Only applies to object-based attributes.
+
+ This is an inlining of existing functionality
+ which roughly corresponds to:
+
+ get_state_history(
+ state,
+ key,
+ passive=PASSIVE_NO_INITIALIZE).sum()
+
+ """
+ raise NotImplementedError()
+
+ def _default_value(self, state, dict_):
+ """Produce an empty value for an uninitialized scalar attribute."""
+
+ assert self.key not in dict_, (
+ "_default_value should only be invoked for an "
+ "uninitialized or expired attribute"
+ )
+
+ value = None
+ for fn in self.dispatch.init_scalar:
+ ret = fn(state, value, dict_)
+ if ret is not ATTR_EMPTY:
+ value = ret
+
+ return value
+
+ def get(self, state, dict_, passive=PASSIVE_OFF):
+ """Retrieve a value from the given object.
+ If a callable is assembled on this object's attribute, and
+ passive is False, the callable will be executed and the
+ resulting value will be set as the new value for this attribute.
+ """
+ if self.key in dict_:
+ return dict_[self.key]
+ else:
+ # if history present, don't load
+ key = self.key
+ if (
+ key not in state.committed_state
+ or state.committed_state[key] is NO_VALUE
+ ):
+ if not passive & CALLABLES_OK:
+ return PASSIVE_NO_RESULT
+
+ value = self._fire_loader_callables(state, key, passive)
+
+ if value is PASSIVE_NO_RESULT or value is NO_VALUE:
+ return value
+ elif value is ATTR_WAS_SET:
+ try:
+ return dict_[key]
+ except KeyError as err:
+ # TODO: no test coverage here.
+ util.raise_(
+ KeyError(
+ "Deferred loader for attribute "
+ "%r failed to populate "
+ "correctly" % key
+ ),
+ replace_context=err,
+ )
+ elif value is not ATTR_EMPTY:
+ return self.set_committed_value(state, dict_, value)
+
+ if not passive & INIT_OK:
+ return NO_VALUE
+ else:
+ return self._default_value(state, dict_)
+
+ def _fire_loader_callables(self, state, key, passive):
+ if (
+ self.accepts_scalar_loader
+ and self.load_on_unexpire
+ and key in state.expired_attributes
+ ):
+ return state._load_expired(state, passive)
+ elif key in state.callables:
+ callable_ = state.callables[key]
+ return callable_(state, passive)
+ elif self.callable_:
+ return self.callable_(state, passive)
+ else:
+ return ATTR_EMPTY
+
+ def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ self.set(state, dict_, value, initiator, passive=passive)
+
+ def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ self.set(
+ state, dict_, None, initiator, passive=passive, check_old=value
+ )
+
+ def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ self.set(
+ state,
+ dict_,
+ None,
+ initiator,
+ passive=passive,
+ check_old=value,
+ pop=True,
+ )
+
+ def set(
+ self,
+ state,
+ dict_,
+ value,
+ initiator,
+ passive=PASSIVE_OFF,
+ check_old=None,
+ pop=False,
+ ):
+ raise NotImplementedError()
+
+ def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
+ """return the unchanged value of this attribute"""
+
+ if self.key in state.committed_state:
+ value = state.committed_state[self.key]
+ if value is NO_VALUE:
+ return None
+ else:
+ return value
+ else:
+ return self.get(state, dict_, passive=passive)
+
+ def set_committed_value(self, state, dict_, value):
+ """set an attribute value on the given instance and 'commit' it."""
+
+ dict_[self.key] = value
+ state._commit(dict_, [self.key])
+ return value
+
+
+class ScalarAttributeImpl(AttributeImpl):
+ """represents a scalar value-holding InstrumentedAttribute."""
+
+ default_accepts_scalar_loader = True
+ uses_objects = False
+ supports_population = True
+ collection = False
+ dynamic = False
+
+ __slots__ = "_replace_token", "_append_token", "_remove_token"
+
+ def __init__(self, *arg, **kw):
+ super(ScalarAttributeImpl, self).__init__(*arg, **kw)
+ self._replace_token = self._append_token = Event(self, OP_REPLACE)
+ self._remove_token = Event(self, OP_REMOVE)
+
+ def delete(self, state, dict_):
+ if self.dispatch._active_history:
+ old = self.get(state, dict_, PASSIVE_RETURN_NO_VALUE)
+ else:
+ old = dict_.get(self.key, NO_VALUE)
+
+ if self.dispatch.remove:
+ self.fire_remove_event(state, dict_, old, self._remove_token)
+ state._modified_event(dict_, self, old)
+
+ existing = dict_.pop(self.key, NO_VALUE)
+ if (
+ existing is NO_VALUE
+ and old is NO_VALUE
+ and not state.expired
+ and self.key not in state.expired_attributes
+ ):
+ raise AttributeError("%s object does not have a value" % self)
+
+ def get_history(self, state, dict_, passive=PASSIVE_OFF):
+ if self.key in dict_:
+ return History.from_scalar_attribute(self, state, dict_[self.key])
+ elif self.key in state.committed_state:
+ return History.from_scalar_attribute(self, state, NO_VALUE)
+ else:
+ if passive & INIT_OK:
+ passive ^= INIT_OK
+ current = self.get(state, dict_, passive=passive)
+ if current is PASSIVE_NO_RESULT:
+ return HISTORY_BLANK
+ else:
+ return History.from_scalar_attribute(self, state, current)
+
+ def set(
+ self,
+ state,
+ dict_,
+ value,
+ initiator,
+ passive=PASSIVE_OFF,
+ check_old=None,
+ pop=False,
+ ):
+ if self.dispatch._active_history:
+ old = self.get(state, dict_, PASSIVE_RETURN_NO_VALUE)
+ else:
+ old = dict_.get(self.key, NO_VALUE)
+
+ if self.dispatch.set:
+ value = self.fire_replace_event(
+ state, dict_, value, old, initiator
+ )
+ state._modified_event(dict_, self, old)
+ dict_[self.key] = value
+
+ def fire_replace_event(self, state, dict_, value, previous, initiator):
+ for fn in self.dispatch.set:
+ value = fn(
+ state, value, previous, initiator or self._replace_token
+ )
+ return value
+
+ def fire_remove_event(self, state, dict_, value, initiator):
+ for fn in self.dispatch.remove:
+ fn(state, value, initiator or self._remove_token)
+
+ @property
+ def type(self):
+ self.property.columns[0].type
+
+
+class ScalarObjectAttributeImpl(ScalarAttributeImpl):
+ """represents a scalar-holding InstrumentedAttribute,
+ where the target object is also instrumented.
+
+ Adds events to delete/set operations.
+
+ """
+
+ default_accepts_scalar_loader = False
+ uses_objects = True
+ supports_population = True
+ collection = False
+
+ __slots__ = ()
+
+ def delete(self, state, dict_):
+ if self.dispatch._active_history:
+ old = self.get(
+ state,
+ dict_,
+ passive=PASSIVE_ONLY_PERSISTENT
+ | NO_AUTOFLUSH
+ | LOAD_AGAINST_COMMITTED,
+ )
+ else:
+ old = self.get(
+ state,
+ dict_,
+ passive=PASSIVE_NO_FETCH ^ INIT_OK
+ | LOAD_AGAINST_COMMITTED
+ | NO_RAISE,
+ )
+
+ self.fire_remove_event(state, dict_, old, self._remove_token)
+
+ existing = dict_.pop(self.key, NO_VALUE)
+
+ # if the attribute is expired, we currently have no way to tell
+ # that an object-attribute was expired vs. not loaded. So
+ # for this test, we look to see if the object has a DB identity.
+ if (
+ existing is NO_VALUE
+ and old is not PASSIVE_NO_RESULT
+ and state.key is None
+ ):
+ raise AttributeError("%s object does not have a value" % self)
+
+ def get_history(self, state, dict_, passive=PASSIVE_OFF):
+ if self.key in dict_:
+ current = dict_[self.key]
+ else:
+ if passive & INIT_OK:
+ passive ^= INIT_OK
+ current = self.get(state, dict_, passive=passive)
+ if current is PASSIVE_NO_RESULT:
+ return HISTORY_BLANK
+
+ if not self._deferred_history:
+ return History.from_object_attribute(self, state, current)
+ else:
+ original = state.committed_state.get(self.key, _NO_HISTORY)
+ if original is PASSIVE_NO_RESULT:
+
+ loader_passive = passive | (
+ PASSIVE_ONLY_PERSISTENT
+ | NO_AUTOFLUSH
+ | LOAD_AGAINST_COMMITTED
+ | NO_RAISE
+ | DEFERRED_HISTORY_LOAD
+ )
+ original = self._fire_loader_callables(
+ state, self.key, loader_passive
+ )
+ return History.from_object_attribute(
+ self, state, current, original=original
+ )
+
+ def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
+ if self.key in dict_:
+ current = dict_[self.key]
+ elif passive & CALLABLES_OK:
+ current = self.get(state, dict_, passive=passive)
+ else:
+ return []
+
+ # can't use __hash__(), can't use __eq__() here
+ if (
+ current is not None
+ and current is not PASSIVE_NO_RESULT
+ and current is not NO_VALUE
+ ):
+ ret = [(instance_state(current), current)]
+ else:
+ ret = [(None, None)]
+
+ if self.key in state.committed_state:
+ original = state.committed_state[self.key]
+ if (
+ original is not None
+ and original is not PASSIVE_NO_RESULT
+ and original is not NO_VALUE
+ and original is not current
+ ):
+
+ ret.append((instance_state(original), original))
+ return ret
+
+ def set(
+ self,
+ state,
+ dict_,
+ value,
+ initiator,
+ passive=PASSIVE_OFF,
+ check_old=None,
+ pop=False,
+ ):
+ """Set a value on the given InstanceState."""
+
+ if self.dispatch._active_history:
+ old = self.get(
+ state,
+ dict_,
+ passive=PASSIVE_ONLY_PERSISTENT
+ | NO_AUTOFLUSH
+ | LOAD_AGAINST_COMMITTED,
+ )
+ else:
+ old = self.get(
+ state,
+ dict_,
+ passive=PASSIVE_NO_FETCH ^ INIT_OK
+ | LOAD_AGAINST_COMMITTED
+ | NO_RAISE,
+ )
+
+ if (
+ check_old is not None
+ and old is not PASSIVE_NO_RESULT
+ and check_old is not old
+ ):
+ if pop:
+ return
+ else:
+ raise ValueError(
+ "Object %s not associated with %s on attribute '%s'"
+ % (instance_str(check_old), state_str(state), self.key)
+ )
+
+ value = self.fire_replace_event(state, dict_, value, old, initiator)
+ dict_[self.key] = value
+
+ def fire_remove_event(self, state, dict_, value, initiator):
+ if self.trackparent and value not in (
+ None,
+ PASSIVE_NO_RESULT,
+ NO_VALUE,
+ ):
+ self.sethasparent(instance_state(value), state, False)
+
+ for fn in self.dispatch.remove:
+ fn(state, value, initiator or self._remove_token)
+
+ state._modified_event(dict_, self, value)
+
+ def fire_replace_event(self, state, dict_, value, previous, initiator):
+ if self.trackparent:
+ if previous is not value and previous not in (
+ None,
+ PASSIVE_NO_RESULT,
+ NO_VALUE,
+ ):
+ self.sethasparent(instance_state(previous), state, False)
+
+ for fn in self.dispatch.set:
+ value = fn(
+ state, value, previous, initiator or self._replace_token
+ )
+
+ state._modified_event(dict_, self, previous)
+
+ if self.trackparent:
+ if value is not None:
+ self.sethasparent(instance_state(value), state, True)
+
+ return value
+
+
+class CollectionAttributeImpl(AttributeImpl):
+ """A collection-holding attribute that instruments changes in membership.
+
+ Only handles collections of instrumented objects.
+
+ InstrumentedCollectionAttribute holds an arbitrary, user-specified
+ container object (defaulting to a list) and brokers access to the
+ CollectionAdapter, a "view" onto that object that presents consistent bag
+ semantics to the orm layer independent of the user data implementation.
+
+ """
+
+ default_accepts_scalar_loader = False
+ uses_objects = True
+ supports_population = True
+ collection = True
+ dynamic = False
+
+ __slots__ = (
+ "copy",
+ "collection_factory",
+ "_append_token",
+ "_remove_token",
+ "_bulk_replace_token",
+ "_duck_typed_as",
+ )
+
+ def __init__(
+ self,
+ class_,
+ key,
+ callable_,
+ dispatch,
+ typecallable=None,
+ trackparent=False,
+ copy_function=None,
+ compare_function=None,
+ **kwargs
+ ):
+ super(CollectionAttributeImpl, self).__init__(
+ class_,
+ key,
+ callable_,
+ dispatch,
+ trackparent=trackparent,
+ compare_function=compare_function,
+ **kwargs
+ )
+
+ if copy_function is None:
+ copy_function = self.__copy
+ self.copy = copy_function
+ self.collection_factory = typecallable
+ self._append_token = Event(self, OP_APPEND)
+ self._remove_token = Event(self, OP_REMOVE)
+ self._bulk_replace_token = Event(self, OP_BULK_REPLACE)
+ self._duck_typed_as = util.duck_type_collection(
+ self.collection_factory()
+ )
+
+ if getattr(self.collection_factory, "_sa_linker", None):
+
+ @event.listens_for(self, "init_collection")
+ def link(target, collection, collection_adapter):
+ collection._sa_linker(collection_adapter)
+
+ @event.listens_for(self, "dispose_collection")
+ def unlink(target, collection, collection_adapter):
+ collection._sa_linker(None)
+
+ def __copy(self, item):
+ return [y for y in collections.collection_adapter(item)]
+
+ def get_history(self, state, dict_, passive=PASSIVE_OFF):
+ current = self.get(state, dict_, passive=passive)
+ if current is PASSIVE_NO_RESULT:
+ return HISTORY_BLANK
+ else:
+ return History.from_collection(self, state, current)
+
+ def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE):
+ # NOTE: passive is ignored here at the moment
+
+ if self.key not in dict_:
+ return []
+
+ current = dict_[self.key]
+ current = getattr(current, "_sa_adapter")
+
+ if self.key in state.committed_state:
+ original = state.committed_state[self.key]
+ if original is not NO_VALUE:
+ current_states = [
+ ((c is not None) and instance_state(c) or None, c)
+ for c in current
+ ]
+ original_states = [
+ ((c is not None) and instance_state(c) or None, c)
+ for c in original
+ ]
+
+ current_set = dict(current_states)
+ original_set = dict(original_states)
+
+ return (
+ [
+ (s, o)
+ for s, o in current_states
+ if s not in original_set
+ ]
+ + [(s, o) for s, o in current_states if s in original_set]
+ + [
+ (s, o)
+ for s, o in original_states
+ if s not in current_set
+ ]
+ )
+
+ return [(instance_state(o), o) for o in current]
+
+ def fire_append_event(self, state, dict_, value, initiator):
+ for fn in self.dispatch.append:
+ value = fn(state, value, initiator or self._append_token)
+
+ state._modified_event(dict_, self, NO_VALUE, True)
+
+ if self.trackparent and value is not None:
+ self.sethasparent(instance_state(value), state, True)
+
+ return value
+
+ def fire_append_wo_mutation_event(self, state, dict_, value, initiator):
+ for fn in self.dispatch.append_wo_mutation:
+ value = fn(state, value, initiator or self._append_token)
+
+ return value
+
+ def fire_pre_remove_event(self, state, dict_, initiator):
+ """A special event used for pop() operations.
+
+ The "remove" event needs to have the item to be removed passed to
+ it, which in the case of pop from a set, we don't have a way to access
+ the item before the operation. the event is used for all pop()
+ operations (even though set.pop is the one where it is really needed).
+
+ """
+ state._modified_event(dict_, self, NO_VALUE, True)
+
+ def fire_remove_event(self, state, dict_, value, initiator):
+ if self.trackparent and value is not None:
+ self.sethasparent(instance_state(value), state, False)
+
+ for fn in self.dispatch.remove:
+ fn(state, value, initiator or self._remove_token)
+
+ state._modified_event(dict_, self, NO_VALUE, True)
+
+ def delete(self, state, dict_):
+ if self.key not in dict_:
+ return
+
+ state._modified_event(dict_, self, NO_VALUE, True)
+
+ collection = self.get_collection(state, state.dict)
+ collection.clear_with_event()
+
+ # key is always present because we checked above. e.g.
+ # del is a no-op if collection not present.
+ del dict_[self.key]
+
+ def _default_value(self, state, dict_):
+ """Produce an empty collection for an un-initialized attribute"""
+
+ assert self.key not in dict_, (
+ "_default_value should only be invoked for an "
+ "uninitialized or expired attribute"
+ )
+
+ if self.key in state._empty_collections:
+ return state._empty_collections[self.key]
+
+ adapter, user_data = self._initialize_collection(state)
+ adapter._set_empty(user_data)
+ return user_data
+
+ def _initialize_collection(self, state):
+
+ adapter, collection = state.manager.initialize_collection(
+ self.key, state, self.collection_factory
+ )
+
+ self.dispatch.init_collection(state, collection, adapter)
+
+ return adapter, collection
+
+ def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ collection = self.get_collection(state, dict_, passive=passive)
+ if collection is PASSIVE_NO_RESULT:
+ value = self.fire_append_event(state, dict_, value, initiator)
+ assert (
+ self.key not in dict_
+ ), "Collection was loaded during event handling."
+ state._get_pending_mutation(self.key).append(value)
+ else:
+ collection.append_with_event(value, initiator)
+
+ def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ collection = self.get_collection(state, state.dict, passive=passive)
+ if collection is PASSIVE_NO_RESULT:
+ self.fire_remove_event(state, dict_, value, initiator)
+ assert (
+ self.key not in dict_
+ ), "Collection was loaded during event handling."
+ state._get_pending_mutation(self.key).remove(value)
+ else:
+ collection.remove_with_event(value, initiator)
+
+ def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
+ try:
+ # TODO: better solution here would be to add
+ # a "popper" role to collections.py to complement
+ # "remover".
+ self.remove(state, dict_, value, initiator, passive=passive)
+ except (ValueError, KeyError, IndexError):
+ pass
+
+ def set(
+ self,
+ state,
+ dict_,
+ value,
+ initiator=None,
+ passive=PASSIVE_OFF,
+ check_old=None,
+ pop=False,
+ _adapt=True,
+ ):
+ iterable = orig_iterable = value
+
+ # pulling a new collection first so that an adaptation exception does
+ # not trigger a lazy load of the old collection.
+ new_collection, user_data = self._initialize_collection(state)
+ if _adapt:
+ if new_collection._converter is not None:
+ iterable = new_collection._converter(iterable)
+ else:
+ setting_type = util.duck_type_collection(iterable)
+ receiving_type = self._duck_typed_as
+
+ if setting_type is not receiving_type:
+ given = (
+ iterable is None
+ and "None"
+ or iterable.__class__.__name__
+ )
+ wanted = self._duck_typed_as.__name__
+ raise TypeError(
+ "Incompatible collection type: %s is not %s-like"
+ % (given, wanted)
+ )
+
+ # If the object is an adapted collection, return the (iterable)
+ # adapter.
+ if hasattr(iterable, "_sa_iterator"):
+ iterable = iterable._sa_iterator()
+ elif setting_type is dict:
+ if util.py3k:
+ iterable = iterable.values()
+ else:
+ iterable = getattr(
+ iterable, "itervalues", iterable.values
+ )()
+ else:
+ iterable = iter(iterable)
+ new_values = list(iterable)
+
+ evt = self._bulk_replace_token
+
+ self.dispatch.bulk_replace(state, new_values, evt)
+
+ old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
+ if old is PASSIVE_NO_RESULT:
+ old = self._default_value(state, dict_)
+ elif old is orig_iterable:
+ # ignore re-assignment of the current collection, as happens
+ # implicitly with in-place operators (foo.collection |= other)
+ return
+
+ # place a copy of "old" in state.committed_state
+ state._modified_event(dict_, self, old, True)
+
+ old_collection = old._sa_adapter
+
+ dict_[self.key] = user_data
+
+ collections.bulk_replace(
+ new_values, old_collection, new_collection, initiator=evt
+ )
+
+ self._dispose_previous_collection(state, old, old_collection, True)
+
+ def _dispose_previous_collection(
+ self, state, collection, adapter, fire_event
+ ):
+ del collection._sa_adapter
+
+ # discarding old collection make sure it is not referenced in empty
+ # collections.
+ state._empty_collections.pop(self.key, None)
+ if fire_event:
+ self.dispatch.dispose_collection(state, collection, adapter)
+
+ def _invalidate_collection(self, collection):
+ adapter = getattr(collection, "_sa_adapter")
+ adapter.invalidated = True
+
+ def set_committed_value(self, state, dict_, value):
+ """Set an attribute value on the given instance and 'commit' it."""
+
+ collection, user_data = self._initialize_collection(state)
+
+ if value:
+ collection.append_multiple_without_event(value)
+
+ state.dict[self.key] = user_data
+
+ state._commit(dict_, [self.key])
+
+ if self.key in state._pending_mutations:
+ # pending items exist. issue a modified event,
+ # add/remove new items.
+ state._modified_event(dict_, self, user_data, True)
+
+ pending = state._pending_mutations.pop(self.key)
+ added = pending.added_items
+ removed = pending.deleted_items
+ for item in added:
+ collection.append_without_event(item)
+ for item in removed:
+ collection.remove_without_event(item)
+
+ return user_data
+
+ def get_collection(
+ self, state, dict_, user_data=None, passive=PASSIVE_OFF
+ ):
+ """Retrieve the CollectionAdapter associated with the given state.
+
+ if user_data is None, retrieves it from the state using normal
+ "get()" rules, which will fire lazy callables or return the "empty"
+ collection value.
+
+ """
+ if user_data is None:
+ user_data = self.get(state, dict_, passive=passive)
+ if user_data is PASSIVE_NO_RESULT:
+ return user_data
+
+ return user_data._sa_adapter
+
+
+def backref_listeners(attribute, key, uselist):
+ """Apply listeners to synchronize a two-way relationship."""
+
+ # use easily recognizable names for stack traces.
+
+ # in the sections marked "tokens to test for a recursive loop",
+ # this is somewhat brittle and very performance-sensitive logic
+ # that is specific to how we might arrive at each event. a marker
+ # that can target us directly to arguments being invoked against
+ # the impl might be simpler, but could interfere with other systems.
+
+ parent_token = attribute.impl.parent_token
+ parent_impl = attribute.impl
+
+ def _acceptable_key_err(child_state, initiator, child_impl):
+ raise ValueError(
+ "Bidirectional attribute conflict detected: "
+ 'Passing object %s to attribute "%s" '
+ 'triggers a modify event on attribute "%s" '
+ 'via the backref "%s".'
+ % (
+ state_str(child_state),
+ initiator.parent_token,
+ child_impl.parent_token,
+ attribute.impl.parent_token,
+ )
+ )
+
+ def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
+ if oldchild is child:
+ return child
+ if (
+ oldchild is not None
+ and oldchild is not PASSIVE_NO_RESULT
+ and oldchild is not NO_VALUE
+ ):
+ # With lazy=None, there's no guarantee that the full collection is
+ # present when updating via a backref.
+ old_state, old_dict = (
+ instance_state(oldchild),
+ instance_dict(oldchild),
+ )
+ impl = old_state.manager[key].impl
+
+ # tokens to test for a recursive loop.
+ if not impl.collection and not impl.dynamic:
+ check_recursive_token = impl._replace_token
+ else:
+ check_recursive_token = impl._remove_token
+
+ if initiator is not check_recursive_token:
+ impl.pop(
+ old_state,
+ old_dict,
+ state.obj(),
+ parent_impl._append_token,
+ passive=PASSIVE_NO_FETCH,
+ )
+
+ if child is not None:
+ child_state, child_dict = (
+ instance_state(child),
+ instance_dict(child),
+ )
+ child_impl = child_state.manager[key].impl
+
+ if (
+ initiator.parent_token is not parent_token
+ and initiator.parent_token is not child_impl.parent_token
+ ):
+ _acceptable_key_err(state, initiator, child_impl)
+
+ # tokens to test for a recursive loop.
+ check_append_token = child_impl._append_token
+ check_bulk_replace_token = (
+ child_impl._bulk_replace_token
+ if child_impl.collection
+ else None
+ )
+
+ if (
+ initiator is not check_append_token
+ and initiator is not check_bulk_replace_token
+ ):
+ child_impl.append(
+ child_state,
+ child_dict,
+ state.obj(),
+ initiator,
+ passive=PASSIVE_NO_FETCH,
+ )
+ return child
+
+ def emit_backref_from_collection_append_event(state, child, initiator):
+ if child is None:
+ return
+
+ child_state, child_dict = instance_state(child), instance_dict(child)
+ child_impl = child_state.manager[key].impl
+
+ if (
+ initiator.parent_token is not parent_token
+ and initiator.parent_token is not child_impl.parent_token
+ ):
+ _acceptable_key_err(state, initiator, child_impl)
+
+ # tokens to test for a recursive loop.
+ check_append_token = child_impl._append_token
+ check_bulk_replace_token = (
+ child_impl._bulk_replace_token if child_impl.collection else None
+ )
+
+ if (
+ initiator is not check_append_token
+ and initiator is not check_bulk_replace_token
+ ):
+ child_impl.append(
+ child_state,
+ child_dict,
+ state.obj(),
+ initiator,
+ passive=PASSIVE_NO_FETCH,
+ )
+ return child
+
+ def emit_backref_from_collection_remove_event(state, child, initiator):
+ if (
+ child is not None
+ and child is not PASSIVE_NO_RESULT
+ and child is not NO_VALUE
+ ):
+ child_state, child_dict = (
+ instance_state(child),
+ instance_dict(child),
+ )
+ child_impl = child_state.manager[key].impl
+
+ # tokens to test for a recursive loop.
+ if not child_impl.collection and not child_impl.dynamic:
+ check_remove_token = child_impl._remove_token
+ check_replace_token = child_impl._replace_token
+ check_for_dupes_on_remove = uselist and not parent_impl.dynamic
+ else:
+ check_remove_token = child_impl._remove_token
+ check_replace_token = (
+ child_impl._bulk_replace_token
+ if child_impl.collection
+ else None
+ )
+ check_for_dupes_on_remove = False
+
+ if (
+ initiator is not check_remove_token
+ and initiator is not check_replace_token
+ ):
+
+ if not check_for_dupes_on_remove or not util.has_dupes(
+ # when this event is called, the item is usually
+ # present in the list, except for a pop() operation.
+ state.dict[parent_impl.key],
+ child,
+ ):
+ child_impl.pop(
+ child_state,
+ child_dict,
+ state.obj(),
+ initiator,
+ passive=PASSIVE_NO_FETCH,
+ )
+
+ if uselist:
+ event.listen(
+ attribute,
+ "append",
+ emit_backref_from_collection_append_event,
+ retval=True,
+ raw=True,
+ )
+ else:
+ event.listen(
+ attribute,
+ "set",
+ emit_backref_from_scalar_set_event,
+ retval=True,
+ raw=True,
+ )
+ # TODO: need coverage in test/orm/ of remove event
+ event.listen(
+ attribute,
+ "remove",
+ emit_backref_from_collection_remove_event,
+ retval=True,
+ raw=True,
+ )
+
+
+_NO_HISTORY = util.symbol("NO_HISTORY")
+_NO_STATE_SYMBOLS = frozenset([id(PASSIVE_NO_RESULT), id(NO_VALUE)])
+
+
+class History(util.namedtuple("History", ["added", "unchanged", "deleted"])):
+ """A 3-tuple of added, unchanged and deleted values,
+ representing the changes which have occurred on an instrumented
+ attribute.
+
+ The easiest way to get a :class:`.History` object for a particular
+ attribute on an object is to use the :func:`_sa.inspect` function::
+
+ from sqlalchemy import inspect
+
+ hist = inspect(myobject).attrs.myattribute.history
+
+ Each tuple member is an iterable sequence:
+
+ * ``added`` - the collection of items added to the attribute (the first
+ tuple element).
+
+ * ``unchanged`` - the collection of items that have not changed on the
+ attribute (the second tuple element).
+
+ * ``deleted`` - the collection of items that have been removed from the
+ attribute (the third tuple element).
+
+ """
+
+ def __bool__(self):
+ return self != HISTORY_BLANK
+
+ __nonzero__ = __bool__
+
+ def empty(self):
+ """Return True if this :class:`.History` has no changes
+ and no existing, unchanged state.
+
+ """
+
+ return not bool((self.added or self.deleted) or self.unchanged)
+
+ def sum(self):
+ """Return a collection of added + unchanged + deleted."""
+
+ return (
+ (self.added or []) + (self.unchanged or []) + (self.deleted or [])
+ )
+
+ def non_deleted(self):
+ """Return a collection of added + unchanged."""
+
+ return (self.added or []) + (self.unchanged or [])
+
+ def non_added(self):
+ """Return a collection of unchanged + deleted."""
+
+ return (self.unchanged or []) + (self.deleted or [])
+
+ def has_changes(self):
+ """Return True if this :class:`.History` has changes."""
+
+ return bool(self.added or self.deleted)
+
+ def as_state(self):
+ return History(
+ [
+ (c is not None) and instance_state(c) or None
+ for c in self.added
+ ],
+ [
+ (c is not None) and instance_state(c) or None
+ for c in self.unchanged
+ ],
+ [
+ (c is not None) and instance_state(c) or None
+ for c in self.deleted
+ ],
+ )
+
+ @classmethod
+ def from_scalar_attribute(cls, attribute, state, current):
+ original = state.committed_state.get(attribute.key, _NO_HISTORY)
+
+ if original is _NO_HISTORY:
+ if current is NO_VALUE:
+ return cls((), (), ())
+ else:
+ return cls((), [current], ())
+ # don't let ClauseElement expressions here trip things up
+ elif (
+ current is not NO_VALUE
+ and attribute.is_equal(current, original) is True
+ ):
+ return cls((), [current], ())
+ else:
+ # current convention on native scalars is to not
+ # include information
+ # about missing previous value in "deleted", but
+ # we do include None, which helps in some primary
+ # key situations
+ if id(original) in _NO_STATE_SYMBOLS:
+ deleted = ()
+ # indicate a "del" operation occurred when we don't have
+ # the previous value as: ([None], (), ())
+ if id(current) in _NO_STATE_SYMBOLS:
+ current = None
+ else:
+ deleted = [original]
+ if current is NO_VALUE:
+ return cls((), (), deleted)
+ else:
+ return cls([current], (), deleted)
+
+ @classmethod
+ def from_object_attribute(
+ cls, attribute, state, current, original=_NO_HISTORY
+ ):
+ if original is _NO_HISTORY:
+ original = state.committed_state.get(attribute.key, _NO_HISTORY)
+
+ if original is _NO_HISTORY:
+ if current is NO_VALUE:
+ return cls((), (), ())
+ else:
+ return cls((), [current], ())
+ elif current is original and current is not NO_VALUE:
+ return cls((), [current], ())
+ else:
+ # current convention on related objects is to not
+ # include information
+ # about missing previous value in "deleted", and
+ # to also not include None - the dependency.py rules
+ # ignore the None in any case.
+ if id(original) in _NO_STATE_SYMBOLS or original is None:
+ deleted = ()
+ # indicate a "del" operation occurred when we don't have
+ # the previous value as: ([None], (), ())
+ if id(current) in _NO_STATE_SYMBOLS:
+ current = None
+ else:
+ deleted = [original]
+ if current is NO_VALUE:
+ return cls((), (), deleted)
+ else:
+ return cls([current], (), deleted)
+
+ @classmethod
+ def from_collection(cls, attribute, state, current):
+ original = state.committed_state.get(attribute.key, _NO_HISTORY)
+ if current is NO_VALUE:
+ return cls((), (), ())
+
+ current = getattr(current, "_sa_adapter")
+ if original is NO_VALUE:
+ return cls(list(current), (), ())
+ elif original is _NO_HISTORY:
+ return cls((), list(current), ())
+ else:
+
+ current_states = [
+ ((c is not None) and instance_state(c) or None, c)
+ for c in current
+ ]
+ original_states = [
+ ((c is not None) and instance_state(c) or None, c)
+ for c in original
+ ]
+
+ current_set = dict(current_states)
+ original_set = dict(original_states)
+
+ return cls(
+ [o for s, o in current_states if s not in original_set],
+ [o for s, o in current_states if s in original_set],
+ [o for s, o in original_states if s not in current_set],
+ )
+
+
+HISTORY_BLANK = History(None, None, None)
+
+
+def get_history(obj, key, passive=PASSIVE_OFF):
+ """Return a :class:`.History` record for the given object
+ and attribute key.
+
+ This is the **pre-flush** history for a given attribute, which is
+ reset each time the :class:`.Session` flushes changes to the
+ current database transaction.
+
+ .. note::
+
+ Prefer to use the :attr:`.AttributeState.history` and
+ :meth:`.AttributeState.load_history` accessors to retrieve the
+ :class:`.History` for instance attributes.
+
+
+ :param obj: an object whose class is instrumented by the
+ attributes package.
+
+ :param key: string attribute name.
+
+ :param passive: indicates loading behavior for the attribute
+ if the value is not already present. This is a
+ bitflag attribute, which defaults to the symbol
+ :attr:`.PASSIVE_OFF` indicating all necessary SQL
+ should be emitted.
+
+ .. seealso::
+
+ :attr:`.AttributeState.history`
+
+ :meth:`.AttributeState.load_history` - retrieve history
+ using loader callables if the value is not locally present.
+
+ """
+
+ return get_state_history(instance_state(obj), key, passive)
+
+
+def get_state_history(state, key, passive=PASSIVE_OFF):
+ return state.get_history(key, passive)
+
+
+def has_parent(cls, obj, key, optimistic=False):
+ """TODO"""
+ manager = manager_of_class(cls)
+ state = instance_state(obj)
+ return manager.has_parent(state, key, optimistic)
+
+
+def register_attribute(class_, key, **kw):
+ comparator = kw.pop("comparator", None)
+ parententity = kw.pop("parententity", None)
+ doc = kw.pop("doc", None)
+ desc = register_descriptor(class_, key, comparator, parententity, doc=doc)
+ register_attribute_impl(class_, key, **kw)
+ return desc
+
+
+def register_attribute_impl(
+ class_,
+ key,
+ uselist=False,
+ callable_=None,
+ useobject=False,
+ impl_class=None,
+ backref=None,
+ **kw
+):
+
+ manager = manager_of_class(class_)
+ if uselist:
+ factory = kw.pop("typecallable", None)
+ typecallable = manager.instrument_collection_class(
+ key, factory or list
+ )
+ else:
+ typecallable = kw.pop("typecallable", None)
+
+ dispatch = manager[key].dispatch
+
+ if impl_class:
+ impl = impl_class(class_, key, typecallable, dispatch, **kw)
+ elif uselist:
+ impl = CollectionAttributeImpl(
+ class_, key, callable_, dispatch, typecallable=typecallable, **kw
+ )
+ elif useobject:
+ impl = ScalarObjectAttributeImpl(
+ class_, key, callable_, dispatch, **kw
+ )
+ else:
+ impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
+
+ manager[key].impl = impl
+
+ if backref:
+ backref_listeners(manager[key], backref, uselist)
+
+ manager.post_configure_attribute(key)
+ return manager[key]
+
+
+def register_descriptor(
+ class_, key, comparator=None, parententity=None, doc=None
+):
+ manager = manager_of_class(class_)
+
+ descriptor = InstrumentedAttribute(
+ class_, key, comparator=comparator, parententity=parententity
+ )
+
+ descriptor.__doc__ = doc
+
+ manager.instrument_attribute(key, descriptor)
+ return descriptor
+
+
+def unregister_attribute(class_, key):
+ manager_of_class(class_).uninstrument_attribute(key)
+
+
+def init_collection(obj, key):
+ """Initialize a collection attribute and return the collection adapter.
+
+ This function is used to provide direct access to collection internals
+ for a previously unloaded attribute. e.g.::
+
+ collection_adapter = init_collection(someobject, 'elements')
+ for elem in values:
+ collection_adapter.append_without_event(elem)
+
+ For an easier way to do the above, see
+ :func:`~sqlalchemy.orm.attributes.set_committed_value`.
+
+ :param obj: a mapped object
+
+ :param key: string attribute name where the collection is located.
+
+ """
+ state = instance_state(obj)
+ dict_ = state.dict
+ return init_state_collection(state, dict_, key)
+
+
+def init_state_collection(state, dict_, key):
+ """Initialize a collection attribute and return the collection adapter.
+
+ Discards any existing collection which may be there.
+
+ """
+ attr = state.manager[key].impl
+
+ old = dict_.pop(key, None) # discard old collection
+ if old is not None:
+ old_collection = old._sa_adapter
+ attr._dispose_previous_collection(state, old, old_collection, False)
+
+ user_data = attr._default_value(state, dict_)
+ adapter = attr.get_collection(state, dict_, user_data)
+ adapter._reset_empty()
+
+ return adapter
+
+
+def set_committed_value(instance, key, value):
+ """Set the value of an attribute with no history events.
+
+ Cancels any previous history present. The value should be
+ a scalar value for scalar-holding attributes, or
+ an iterable for any collection-holding attribute.
+
+ This is the same underlying method used when a lazy loader
+ fires off and loads additional data from the database.
+ In particular, this method can be used by application code
+ which has loaded additional attributes or collections through
+ separate queries, which can then be attached to an instance
+ as though it were part of its original loaded state.
+
+ """
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ state.manager[key].impl.set_committed_value(state, dict_, value)
+
+
+def set_attribute(instance, key, value, initiator=None):
+ """Set the value of an attribute, firing history events.
+
+ This function may be used regardless of instrumentation
+ applied directly to the class, i.e. no descriptors are required.
+ Custom attribute management schemes will need to make usage
+ of this method to establish attribute state as understood
+ by SQLAlchemy.
+
+ :param instance: the object that will be modified
+
+ :param key: string name of the attribute
+
+ :param value: value to assign
+
+ :param initiator: an instance of :class:`.Event` that would have
+ been propagated from a previous event listener. This argument
+ is used when the :func:`.set_attribute` function is being used within
+ an existing event listening function where an :class:`.Event` object
+ is being supplied; the object may be used to track the origin of the
+ chain of events.
+
+ .. versionadded:: 1.2.3
+
+ """
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ state.manager[key].impl.set(state, dict_, value, initiator)
+
+
+def get_attribute(instance, key):
+ """Get the value of an attribute, firing any callables required.
+
+ This function may be used regardless of instrumentation
+ applied directly to the class, i.e. no descriptors are required.
+ Custom attribute management schemes will need to make usage
+ of this method to make usage of attribute state as understood
+ by SQLAlchemy.
+
+ """
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ return state.manager[key].impl.get(state, dict_)
+
+
+def del_attribute(instance, key):
+ """Delete the value of an attribute, firing history events.
+
+ This function may be used regardless of instrumentation
+ applied directly to the class, i.e. no descriptors are required.
+ Custom attribute management schemes will need to make usage
+ of this method to establish attribute state as understood
+ by SQLAlchemy.
+
+ """
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ state.manager[key].impl.delete(state, dict_)
+
+
+def flag_modified(instance, key):
+ """Mark an attribute on an instance as 'modified'.
+
+ This sets the 'modified' flag on the instance and
+ establishes an unconditional change event for the given attribute.
+ The attribute must have a value present, else an
+ :class:`.InvalidRequestError` is raised.
+
+ To mark an object "dirty" without referring to any specific attribute
+ so that it is considered within a flush, use the
+ :func:`.attributes.flag_dirty` call.
+
+ .. seealso::
+
+ :func:`.attributes.flag_dirty`
+
+ """
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ impl = state.manager[key].impl
+ impl.dispatch.modified(state, impl._modified_token)
+ state._modified_event(dict_, impl, NO_VALUE, is_userland=True)
+
+
+def flag_dirty(instance):
+ """Mark an instance as 'dirty' without any specific attribute mentioned.
+
+ This is a special operation that will allow the object to travel through
+ the flush process for interception by events such as
+ :meth:`.SessionEvents.before_flush`. Note that no SQL will be emitted in
+ the flush process for an object that has no changes, even if marked dirty
+ via this method. However, a :meth:`.SessionEvents.before_flush` handler
+ will be able to see the object in the :attr:`.Session.dirty` collection and
+ may establish changes on it, which will then be included in the SQL
+ emitted.
+
+ .. versionadded:: 1.2
+
+ .. seealso::
+
+ :func:`.attributes.flag_modified`
+
+ """
+
+ state, dict_ = instance_state(instance), instance_dict(instance)
+ state._modified_event(dict_, None, NO_VALUE, is_userland=True)
diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py
new file mode 100644
index 0000000..8e94d7b
--- /dev/null
+++ b/lib/sqlalchemy/orm/base.py
@@ -0,0 +1,572 @@
+# orm/base.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Constants and rudimental functions used throughout the ORM.
+
+"""
+
+import operator
+
+from . import exc
+from .. import exc as sa_exc
+from .. import inspection
+from .. import util
+
+
+PASSIVE_NO_RESULT = util.symbol(
+ "PASSIVE_NO_RESULT",
+ """Symbol returned by a loader callable or other attribute/history
+ retrieval operation when a value could not be determined, based
+ on loader callable flags.
+ """,
+)
+
+PASSIVE_CLASS_MISMATCH = util.symbol(
+ "PASSIVE_CLASS_MISMATCH",
+ """Symbol indicating that an object is locally present for a given
+ primary key identity but it is not of the requested class. The
+ return value is therefore None and no SQL should be emitted.""",
+)
+
+ATTR_WAS_SET = util.symbol(
+ "ATTR_WAS_SET",
+ """Symbol returned by a loader callable to indicate the
+ retrieved value, or values, were assigned to their attributes
+ on the target object.
+ """,
+)
+
+ATTR_EMPTY = util.symbol(
+ "ATTR_EMPTY",
+ """Symbol used internally to indicate an attribute had no callable.""",
+)
+
+NO_VALUE = util.symbol(
+ "NO_VALUE",
+ """Symbol which may be placed as the 'previous' value of an attribute,
+ indicating no value was loaded for an attribute when it was modified,
+ and flags indicated we were not to load it.
+ """,
+)
+NEVER_SET = NO_VALUE
+"""
+Synonymous with NO_VALUE
+
+.. versionchanged:: 1.4 NEVER_SET was merged with NO_VALUE
+"""
+
+NO_CHANGE = util.symbol(
+ "NO_CHANGE",
+ """No callables or SQL should be emitted on attribute access
+ and no state should change
+ """,
+ canonical=0,
+)
+
+CALLABLES_OK = util.symbol(
+ "CALLABLES_OK",
+ """Loader callables can be fired off if a value
+ is not present.
+ """,
+ canonical=1,
+)
+
+SQL_OK = util.symbol(
+ "SQL_OK",
+ """Loader callables can emit SQL at least on scalar value attributes.""",
+ canonical=2,
+)
+
+RELATED_OBJECT_OK = util.symbol(
+ "RELATED_OBJECT_OK",
+ """Callables can use SQL to load related objects as well
+ as scalar value attributes.
+ """,
+ canonical=4,
+)
+
+INIT_OK = util.symbol(
+ "INIT_OK",
+ """Attributes should be initialized with a blank
+ value (None or an empty collection) upon get, if no other
+ value can be obtained.
+ """,
+ canonical=8,
+)
+
+NON_PERSISTENT_OK = util.symbol(
+ "NON_PERSISTENT_OK",
+ """Callables can be emitted if the parent is not persistent.""",
+ canonical=16,
+)
+
+LOAD_AGAINST_COMMITTED = util.symbol(
+ "LOAD_AGAINST_COMMITTED",
+ """Callables should use committed values as primary/foreign keys during a
+ load.
+ """,
+ canonical=32,
+)
+
+NO_AUTOFLUSH = util.symbol(
+ "NO_AUTOFLUSH",
+ """Loader callables should disable autoflush.""",
+ canonical=64,
+)
+
+NO_RAISE = util.symbol(
+ "NO_RAISE",
+ """Loader callables should not raise any assertions""",
+ canonical=128,
+)
+
+DEFERRED_HISTORY_LOAD = util.symbol(
+ "DEFERRED_HISTORY_LOAD",
+ """indicates special load of the previous value of an attribute""",
+ canonical=256,
+)
+
+# pre-packaged sets of flags used as inputs
+PASSIVE_OFF = util.symbol(
+ "PASSIVE_OFF",
+ "Callables can be emitted in all cases.",
+ canonical=(
+ RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
+ ),
+)
+PASSIVE_RETURN_NO_VALUE = util.symbol(
+ "PASSIVE_RETURN_NO_VALUE",
+ """PASSIVE_OFF ^ INIT_OK""",
+ canonical=PASSIVE_OFF ^ INIT_OK,
+)
+PASSIVE_NO_INITIALIZE = util.symbol(
+ "PASSIVE_NO_INITIALIZE",
+ "PASSIVE_RETURN_NO_VALUE ^ CALLABLES_OK",
+ canonical=PASSIVE_RETURN_NO_VALUE ^ CALLABLES_OK,
+)
+PASSIVE_NO_FETCH = util.symbol(
+ "PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK
+)
+PASSIVE_NO_FETCH_RELATED = util.symbol(
+ "PASSIVE_NO_FETCH_RELATED",
+ "PASSIVE_OFF ^ RELATED_OBJECT_OK",
+ canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK,
+)
+PASSIVE_ONLY_PERSISTENT = util.symbol(
+ "PASSIVE_ONLY_PERSISTENT",
+ "PASSIVE_OFF ^ NON_PERSISTENT_OK",
+ canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK,
+)
+
+DEFAULT_MANAGER_ATTR = "_sa_class_manager"
+DEFAULT_STATE_ATTR = "_sa_instance_state"
+
+EXT_CONTINUE = util.symbol("EXT_CONTINUE")
+EXT_STOP = util.symbol("EXT_STOP")
+EXT_SKIP = util.symbol("EXT_SKIP")
+
+ONETOMANY = util.symbol(
+ "ONETOMANY",
+ """Indicates the one-to-many direction for a :func:`_orm.relationship`.
+
+ This symbol is typically used by the internals but may be exposed within
+ certain API features.
+
+ """,
+)
+
+MANYTOONE = util.symbol(
+ "MANYTOONE",
+ """Indicates the many-to-one direction for a :func:`_orm.relationship`.
+
+ This symbol is typically used by the internals but may be exposed within
+ certain API features.
+
+ """,
+)
+
+MANYTOMANY = util.symbol(
+ "MANYTOMANY",
+ """Indicates the many-to-many direction for a :func:`_orm.relationship`.
+
+ This symbol is typically used by the internals but may be exposed within
+ certain API features.
+
+ """,
+)
+
+NOT_EXTENSION = util.symbol(
+ "NOT_EXTENSION",
+ """Symbol indicating an :class:`InspectionAttr` that's
+ not part of sqlalchemy.ext.
+
+ Is assigned to the :attr:`.InspectionAttr.extension_type`
+ attribute.
+
+ """,
+)
+
+_never_set = frozenset([NEVER_SET])
+
+_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
+
+_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
+
+_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
+
+_RAISE_FOR_STATE = util.symbol("RAISE_FOR_STATE")
+
+
+def _assertions(*assertions):
+ @util.decorator
+ def generate(fn, *args, **kw):
+ self = args[0]
+ for assertion in assertions:
+ assertion(self, fn.__name__)
+ fn(self, *args[1:], **kw)
+
+ return generate
+
+
+# these can be replaced by sqlalchemy.ext.instrumentation
+# if augmented class instrumentation is enabled.
+def manager_of_class(cls):
+ return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
+
+
+instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
+
+instance_dict = operator.attrgetter("__dict__")
+
+
+def instance_str(instance):
+ """Return a string describing an instance."""
+
+ return state_str(instance_state(instance))
+
+
+def state_str(state):
+ """Return a string describing an instance via its InstanceState."""
+
+ if state is None:
+ return "None"
+ else:
+ return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj()))
+
+
+def state_class_str(state):
+ """Return a string describing an instance's class via its
+ InstanceState.
+ """
+
+ if state is None:
+ return "None"
+ else:
+ return "<%s>" % (state.class_.__name__,)
+
+
+def attribute_str(instance, attribute):
+ return instance_str(instance) + "." + attribute
+
+
+def state_attribute_str(state, attribute):
+ return state_str(state) + "." + attribute
+
+
+def object_mapper(instance):
+ """Given an object, return the primary Mapper associated with the object
+ instance.
+
+ Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
+ if no mapping is configured.
+
+ This function is available via the inspection system as::
+
+ inspect(instance).mapper
+
+ Using the inspection system will raise
+ :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
+ not part of a mapping.
+
+ """
+ return object_state(instance).mapper
+
+
+def object_state(instance):
+ """Given an object, return the :class:`.InstanceState`
+ associated with the object.
+
+ Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
+ if no mapping is configured.
+
+ Equivalent functionality is available via the :func:`_sa.inspect`
+ function as::
+
+ inspect(instance)
+
+ Using the inspection system will raise
+ :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
+ not part of a mapping.
+
+ """
+ state = _inspect_mapped_object(instance)
+ if state is None:
+ raise exc.UnmappedInstanceError(instance)
+ else:
+ return state
+
+
+@inspection._inspects(object)
+def _inspect_mapped_object(instance):
+ try:
+ return instance_state(instance)
+ except (exc.UnmappedClassError,) + exc.NO_STATE:
+ return None
+
+
+def _class_to_mapper(class_or_mapper):
+ insp = inspection.inspect(class_or_mapper, False)
+ if insp is not None:
+ return insp.mapper
+ else:
+ raise exc.UnmappedClassError(class_or_mapper)
+
+
+def _mapper_or_none(entity):
+ """Return the :class:`_orm.Mapper` for the given class or None if the
+ class is not mapped.
+ """
+
+ insp = inspection.inspect(entity, False)
+ if insp is not None:
+ return insp.mapper
+ else:
+ return None
+
+
+def _is_mapped_class(entity):
+ """Return True if the given object is a mapped class,
+ :class:`_orm.Mapper`, or :class:`.AliasedClass`.
+ """
+
+ insp = inspection.inspect(entity, False)
+ return (
+ insp is not None
+ and not insp.is_clause_element
+ and (insp.is_mapper or insp.is_aliased_class)
+ )
+
+
+def _orm_columns(entity):
+ insp = inspection.inspect(entity, False)
+ if hasattr(insp, "selectable") and hasattr(insp.selectable, "c"):
+ return [c for c in insp.selectable.c]
+ else:
+ return [entity]
+
+
+def _is_aliased_class(entity):
+ insp = inspection.inspect(entity, False)
+ return insp is not None and getattr(insp, "is_aliased_class", False)
+
+
+def _entity_descriptor(entity, key):
+ """Return a class attribute given an entity and string name.
+
+ May return :class:`.InstrumentedAttribute` or user-defined
+ attribute.
+
+ """
+ insp = inspection.inspect(entity)
+ if insp.is_selectable:
+ description = entity
+ entity = insp.c
+ elif insp.is_aliased_class:
+ entity = insp.entity
+ description = entity
+ elif hasattr(insp, "mapper"):
+ description = entity = insp.mapper.class_
+ else:
+ description = entity
+
+ try:
+ return getattr(entity, key)
+ except AttributeError as err:
+ util.raise_(
+ sa_exc.InvalidRequestError(
+ "Entity '%s' has no property '%s'" % (description, key)
+ ),
+ replace_context=err,
+ )
+
+
+_state_mapper = util.dottedgetter("manager.mapper")
+
+
+@inspection._inspects(type)
+def _inspect_mapped_class(class_, configure=False):
+ try:
+ class_manager = manager_of_class(class_)
+ if not class_manager.is_mapped:
+ return None
+ mapper = class_manager.mapper
+ except exc.NO_STATE:
+ return None
+ else:
+ if configure:
+ mapper._check_configure()
+ return mapper
+
+
+def class_mapper(class_, configure=True):
+ """Given a class, return the primary :class:`_orm.Mapper` associated
+ with the key.
+
+ Raises :exc:`.UnmappedClassError` if no mapping is configured
+ on the given class, or :exc:`.ArgumentError` if a non-class
+ object is passed.
+
+ Equivalent functionality is available via the :func:`_sa.inspect`
+ function as::
+
+ inspect(some_mapped_class)
+
+ Using the inspection system will raise
+ :class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
+
+ """
+ mapper = _inspect_mapped_class(class_, configure=configure)
+ if mapper is None:
+ if not isinstance(class_, type):
+ raise sa_exc.ArgumentError(
+ "Class object expected, got '%r'." % (class_,)
+ )
+ raise exc.UnmappedClassError(class_)
+ else:
+ return mapper
+
+
+class InspectionAttr(object):
+ """A base class applied to all ORM objects that can be returned
+ by the :func:`_sa.inspect` function.
+
+ The attributes defined here allow the usage of simple boolean
+ checks to test basic facts about the object returned.
+
+ While the boolean checks here are basically the same as using
+ the Python isinstance() function, the flags here can be used without
+ the need to import all of these classes, and also such that
+ the SQLAlchemy class system can change while leaving the flags
+ here intact for forwards-compatibility.
+
+ """
+
+ __slots__ = ()
+
+ is_selectable = False
+ """Return True if this object is an instance of
+ :class:`_expression.Selectable`."""
+
+ is_aliased_class = False
+ """True if this object is an instance of :class:`.AliasedClass`."""
+
+ is_instance = False
+ """True if this object is an instance of :class:`.InstanceState`."""
+
+ is_mapper = False
+ """True if this object is an instance of :class:`_orm.Mapper`."""
+
+ is_bundle = False
+ """True if this object is an instance of :class:`.Bundle`."""
+
+ is_property = False
+ """True if this object is an instance of :class:`.MapperProperty`."""
+
+ is_attribute = False
+ """True if this object is a Python :term:`descriptor`.
+
+ This can refer to one of many types. Usually a
+ :class:`.QueryableAttribute` which handles attributes events on behalf
+ of a :class:`.MapperProperty`. But can also be an extension type
+ such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
+ The :attr:`.InspectionAttr.extension_type` will refer to a constant
+ identifying the specific subtype.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.all_orm_descriptors`
+
+ """
+
+ _is_internal_proxy = False
+ """True if this object is an internal proxy object.
+
+ .. versionadded:: 1.2.12
+
+ """
+
+ is_clause_element = False
+ """True if this object is an instance of
+ :class:`_expression.ClauseElement`."""
+
+ extension_type = NOT_EXTENSION
+ """The extension type, if any.
+ Defaults to :data:`.interfaces.NOT_EXTENSION`
+
+ .. seealso::
+
+ :data:`.HYBRID_METHOD`
+
+ :data:`.HYBRID_PROPERTY`
+
+ :data:`.ASSOCIATION_PROXY`
+
+ """
+
+
+class InspectionAttrInfo(InspectionAttr):
+ """Adds the ``.info`` attribute to :class:`.InspectionAttr`.
+
+ The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
+ is that the former is compatible as a mixin for classes that specify
+ ``__slots__``; this is essentially an implementation artifact.
+
+ """
+
+ @util.memoized_property
+ def info(self):
+ """Info dictionary associated with the object, allowing user-defined
+ data to be associated with this :class:`.InspectionAttr`.
+
+ The dictionary is generated when first accessed. Alternatively,
+ it can be specified as a constructor argument to the
+ :func:`.column_property`, :func:`_orm.relationship`, or
+ :func:`.composite`
+ functions.
+
+ .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
+ available on extension types via the
+ :attr:`.InspectionAttrInfo.info` attribute, so that it can apply
+ to a wider variety of ORM and extension constructs.
+
+ .. seealso::
+
+ :attr:`.QueryableAttribute.info`
+
+ :attr:`.SchemaItem.info`
+
+ """
+ return {}
+
+
+class _MappedAttribute(object):
+ """Mixin for attributes which should be replaced by mapper-assigned
+ attributes.
+
+ """
+
+ __slots__ = ()
diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py
new file mode 100644
index 0000000..2c21498
--- /dev/null
+++ b/lib/sqlalchemy/orm/clsregistry.py
@@ -0,0 +1,441 @@
+# ext/declarative/clsregistry.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+"""Routines to handle the string class registry used by declarative.
+
+This system allows specification of classes and expressions used in
+:func:`_orm.relationship` using strings.
+
+"""
+import weakref
+
+from . import attributes
+from . import interfaces
+from .descriptor_props import SynonymProperty
+from .properties import ColumnProperty
+from .util import class_mapper
+from .. import exc
+from .. import inspection
+from .. import util
+from ..sql.schema import _get_table_key
+
+# strong references to registries which we place in
+# the _decl_class_registry, which is usually weak referencing.
+# the internal registries here link to classes with weakrefs and remove
+# themselves when all references to contained classes are removed.
+_registries = set()
+
+
+def add_class(classname, cls, decl_class_registry):
+ """Add a class to the _decl_class_registry associated with the
+ given declarative class.
+
+ """
+ if classname in decl_class_registry:
+ # class already exists.
+ existing = decl_class_registry[classname]
+ if not isinstance(existing, _MultipleClassMarker):
+ existing = decl_class_registry[classname] = _MultipleClassMarker(
+ [cls, existing]
+ )
+ else:
+ decl_class_registry[classname] = cls
+
+ try:
+ root_module = decl_class_registry["_sa_module_registry"]
+ except KeyError:
+ decl_class_registry[
+ "_sa_module_registry"
+ ] = root_module = _ModuleMarker("_sa_module_registry", None)
+
+ tokens = cls.__module__.split(".")
+
+ # build up a tree like this:
+ # modulename: myapp.snacks.nuts
+ #
+ # myapp->snack->nuts->(classes)
+ # snack->nuts->(classes)
+ # nuts->(classes)
+ #
+ # this allows partial token paths to be used.
+ while tokens:
+ token = tokens.pop(0)
+ module = root_module.get_module(token)
+ for token in tokens:
+ module = module.get_module(token)
+ module.add_class(classname, cls)
+
+
+def remove_class(classname, cls, decl_class_registry):
+ if classname in decl_class_registry:
+ existing = decl_class_registry[classname]
+ if isinstance(existing, _MultipleClassMarker):
+ existing.remove_item(cls)
+ else:
+ del decl_class_registry[classname]
+
+ try:
+ root_module = decl_class_registry["_sa_module_registry"]
+ except KeyError:
+ return
+
+ tokens = cls.__module__.split(".")
+
+ while tokens:
+ token = tokens.pop(0)
+ module = root_module.get_module(token)
+ for token in tokens:
+ module = module.get_module(token)
+ module.remove_class(classname, cls)
+
+
+def _key_is_empty(key, decl_class_registry, test):
+ """test if a key is empty of a certain object.
+
+ used for unit tests against the registry to see if garbage collection
+ is working.
+
+ "test" is a callable that will be passed an object should return True
+ if the given object is the one we were looking for.
+
+ We can't pass the actual object itself b.c. this is for testing garbage
+ collection; the caller will have to have removed references to the
+ object itself.
+
+ """
+ if key not in decl_class_registry:
+ return True
+
+ thing = decl_class_registry[key]
+ if isinstance(thing, _MultipleClassMarker):
+ for sub_thing in thing.contents:
+ if test(sub_thing):
+ return False
+ else:
+ return not test(thing)
+
+
+class _MultipleClassMarker(object):
+ """refers to multiple classes of the same name
+ within _decl_class_registry.
+
+ """
+
+ __slots__ = "on_remove", "contents", "__weakref__"
+
+ def __init__(self, classes, on_remove=None):
+ self.on_remove = on_remove
+ self.contents = set(
+ [weakref.ref(item, self._remove_item) for item in classes]
+ )
+ _registries.add(self)
+
+ def remove_item(self, cls):
+ self._remove_item(weakref.ref(cls))
+
+ def __iter__(self):
+ return (ref() for ref in self.contents)
+
+ def attempt_get(self, path, key):
+ if len(self.contents) > 1:
+ raise exc.InvalidRequestError(
+ 'Multiple classes found for path "%s" '
+ "in the registry of this declarative "
+ "base. Please use a fully module-qualified path."
+ % (".".join(path + [key]))
+ )
+ else:
+ ref = list(self.contents)[0]
+ cls = ref()
+ if cls is None:
+ raise NameError(key)
+ return cls
+
+ def _remove_item(self, ref):
+ self.contents.discard(ref)
+ if not self.contents:
+ _registries.discard(self)
+ if self.on_remove:
+ self.on_remove()
+
+ def add_item(self, item):
+ # protect against class registration race condition against
+ # asynchronous garbage collection calling _remove_item,
+ # [ticket:3208]
+ modules = set(
+ [
+ cls.__module__
+ for cls in [ref() for ref in self.contents]
+ if cls is not None
+ ]
+ )
+ if item.__module__ in modules:
+ util.warn(
+ "This declarative base already contains a class with the "
+ "same class name and module name as %s.%s, and will "
+ "be replaced in the string-lookup table."
+ % (item.__module__, item.__name__)
+ )
+ self.contents.add(weakref.ref(item, self._remove_item))
+
+
+class _ModuleMarker(object):
+ """Refers to a module name within
+ _decl_class_registry.
+
+ """
+
+ __slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__"
+
+ def __init__(self, name, parent):
+ self.parent = parent
+ self.name = name
+ self.contents = {}
+ self.mod_ns = _ModNS(self)
+ if self.parent:
+ self.path = self.parent.path + [self.name]
+ else:
+ self.path = []
+ _registries.add(self)
+
+ def __contains__(self, name):
+ return name in self.contents
+
+ def __getitem__(self, name):
+ return self.contents[name]
+
+ def _remove_item(self, name):
+ self.contents.pop(name, None)
+ if not self.contents and self.parent is not None:
+ self.parent._remove_item(self.name)
+ _registries.discard(self)
+
+ def resolve_attr(self, key):
+ return getattr(self.mod_ns, key)
+
+ def get_module(self, name):
+ if name not in self.contents:
+ marker = _ModuleMarker(name, self)
+ self.contents[name] = marker
+ else:
+ marker = self.contents[name]
+ return marker
+
+ def add_class(self, name, cls):
+ if name in self.contents:
+ existing = self.contents[name]
+ existing.add_item(cls)
+ else:
+ existing = self.contents[name] = _MultipleClassMarker(
+ [cls], on_remove=lambda: self._remove_item(name)
+ )
+
+ def remove_class(self, name, cls):
+ if name in self.contents:
+ existing = self.contents[name]
+ existing.remove_item(cls)
+
+
+class _ModNS(object):
+ __slots__ = ("__parent",)
+
+ def __init__(self, parent):
+ self.__parent = parent
+
+ def __getattr__(self, key):
+ try:
+ value = self.__parent.contents[key]
+ except KeyError:
+ pass
+ else:
+ if value is not None:
+ if isinstance(value, _ModuleMarker):
+ return value.mod_ns
+ else:
+ assert isinstance(value, _MultipleClassMarker)
+ return value.attempt_get(self.__parent.path, key)
+ raise NameError(
+ "Module %r has no mapped classes "
+ "registered under the name %r" % (self.__parent.name, key)
+ )
+
+
+class _GetColumns(object):
+ __slots__ = ("cls",)
+
+ def __init__(self, cls):
+ self.cls = cls
+
+ def __getattr__(self, key):
+ mp = class_mapper(self.cls, configure=False)
+ if mp:
+ if key not in mp.all_orm_descriptors:
+ raise AttributeError(
+ "Class %r does not have a mapped column named %r"
+ % (self.cls, key)
+ )
+
+ desc = mp.all_orm_descriptors[key]
+ if desc.extension_type is interfaces.NOT_EXTENSION:
+ prop = desc.property
+ if isinstance(prop, SynonymProperty):
+ key = prop.name
+ elif not isinstance(prop, ColumnProperty):
+ raise exc.InvalidRequestError(
+ "Property %r is not an instance of"
+ " ColumnProperty (i.e. does not correspond"
+ " directly to a Column)." % key
+ )
+ return getattr(self.cls, key)
+
+
+inspection._inspects(_GetColumns)(
+ lambda target: inspection.inspect(target.cls)
+)
+
+
+class _GetTable(object):
+ __slots__ = "key", "metadata"
+
+ def __init__(self, key, metadata):
+ self.key = key
+ self.metadata = metadata
+
+ def __getattr__(self, key):
+ return self.metadata.tables[_get_table_key(key, self.key)]
+
+
+def _determine_container(key, value):
+ if isinstance(value, _MultipleClassMarker):
+ value = value.attempt_get([], key)
+ return _GetColumns(value)
+
+
+class _class_resolver(object):
+ __slots__ = (
+ "cls",
+ "prop",
+ "arg",
+ "fallback",
+ "_dict",
+ "_resolvers",
+ "favor_tables",
+ )
+
+ def __init__(self, cls, prop, fallback, arg, favor_tables=False):
+ self.cls = cls
+ self.prop = prop
+ self.arg = arg
+ self.fallback = fallback
+ self._dict = util.PopulateDict(self._access_cls)
+ self._resolvers = ()
+ self.favor_tables = favor_tables
+
+ def _access_cls(self, key):
+ cls = self.cls
+
+ manager = attributes.manager_of_class(cls)
+ decl_base = manager.registry
+ decl_class_registry = decl_base._class_registry
+ metadata = decl_base.metadata
+
+ if self.favor_tables:
+ if key in metadata.tables:
+ return metadata.tables[key]
+ elif key in metadata._schemas:
+ return _GetTable(key, cls.metadata)
+
+ if key in decl_class_registry:
+ return _determine_container(key, decl_class_registry[key])
+
+ if not self.favor_tables:
+ if key in metadata.tables:
+ return metadata.tables[key]
+ elif key in metadata._schemas:
+ return _GetTable(key, cls.metadata)
+
+ if (
+ "_sa_module_registry" in decl_class_registry
+ and key in decl_class_registry["_sa_module_registry"]
+ ):
+ registry = decl_class_registry["_sa_module_registry"]
+ return registry.resolve_attr(key)
+ elif self._resolvers:
+ for resolv in self._resolvers:
+ value = resolv(key)
+ if value is not None:
+ return value
+
+ return self.fallback[key]
+
+ def _raise_for_name(self, name, err):
+ util.raise_(
+ exc.InvalidRequestError(
+ "When initializing mapper %s, expression %r failed to "
+ "locate a name (%r). If this is a class name, consider "
+ "adding this relationship() to the %r class after "
+ "both dependent classes have been defined."
+ % (self.prop.parent, self.arg, name, self.cls)
+ ),
+ from_=err,
+ )
+
+ def _resolve_name(self):
+ name = self.arg
+ d = self._dict
+ rval = None
+ try:
+ for token in name.split("."):
+ if rval is None:
+ rval = d[token]
+ else:
+ rval = getattr(rval, token)
+ except KeyError as err:
+ self._raise_for_name(name, err)
+ except NameError as n:
+ self._raise_for_name(n.args[0], n)
+ else:
+ if isinstance(rval, _GetColumns):
+ return rval.cls
+ else:
+ return rval
+
+ def __call__(self):
+ try:
+ x = eval(self.arg, globals(), self._dict)
+
+ if isinstance(x, _GetColumns):
+ return x.cls
+ else:
+ return x
+ except NameError as n:
+ self._raise_for_name(n.args[0], n)
+
+
+_fallback_dict = None
+
+
+def _resolver(cls, prop):
+
+ global _fallback_dict
+
+ if _fallback_dict is None:
+ import sqlalchemy
+ from sqlalchemy.orm import foreign, remote
+
+ _fallback_dict = util.immutabledict(sqlalchemy.__dict__).union(
+ {"foreign": foreign, "remote": remote}
+ )
+
+ def resolve_arg(arg, favor_tables=False):
+ return _class_resolver(
+ cls, prop, _fallback_dict, arg, favor_tables=favor_tables
+ )
+
+ def resolve_name(arg):
+ return _class_resolver(cls, prop, _fallback_dict, arg)._resolve_name
+
+ return resolve_name, resolve_arg
diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py
new file mode 100644
index 0000000..a189f02
--- /dev/null
+++ b/lib/sqlalchemy/orm/collections.py
@@ -0,0 +1,1706 @@
+# orm/collections.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Support for collections of mapped entities.
+
+The collections package supplies the machinery used to inform the ORM of
+collection membership changes. An instrumentation via decoration approach is
+used, allowing arbitrary types (including built-ins) to be used as entity
+collections without requiring inheritance from a base class.
+
+Instrumentation decoration relays membership change events to the
+:class:`.CollectionAttributeImpl` that is currently managing the collection.
+The decorators observe function call arguments and return values, tracking
+entities entering or leaving the collection. Two decorator approaches are
+provided. One is a bundle of generic decorators that map function arguments
+and return values to events::
+
+ from sqlalchemy.orm.collections import collection
+ class MyClass(object):
+ # ...
+
+ @collection.adds(1)
+ def store(self, item):
+ self.data.append(item)
+
+ @collection.removes_return()
+ def pop(self):
+ return self.data.pop()
+
+
+The second approach is a bundle of targeted decorators that wrap appropriate
+append and remove notifiers around the mutation methods present in the
+standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
+specified in terms of generic decorator recipes, but are instead hand-tooled
+for increased efficiency. The targeted decorators occasionally implement
+adapter-like behavior, such as mapping bulk-set methods (``extend``,
+``update``, ``__setslice__``, etc.) into the series of atomic mutation events
+that the ORM requires.
+
+The targeted decorators are used internally for automatic instrumentation of
+entity collection classes. Every collection class goes through a
+transformation process roughly like so:
+
+1. If the class is a built-in, substitute a trivial sub-class
+2. Is this class already instrumented?
+3. Add in generic decorators
+4. Sniff out the collection interface through duck-typing
+5. Add targeted decoration to any undecorated interface method
+
+This process modifies the class at runtime, decorating methods and adding some
+bookkeeping properties. This isn't possible (or desirable) for built-in
+classes like ``list``, so trivial sub-classes are substituted to hold
+decoration::
+
+ class InstrumentedList(list):
+ pass
+
+Collection classes can be specified in ``relationship(collection_class=)`` as
+types or a function that returns an instance. Collection classes are
+inspected and instrumented during the mapper compilation phase. The
+collection_class callable will be executed once to produce a specimen
+instance, and the type of that specimen will be instrumented. Functions that
+return built-in types like ``lists`` will be adapted to produce instrumented
+instances.
+
+When extending a known type like ``list``, additional decorations are not
+generally not needed. Odds are, the extension method will delegate to a
+method that's already instrumented. For example::
+
+ class QueueIsh(list):
+ def push(self, item):
+ self.append(item)
+ def shift(self):
+ return self.pop(0)
+
+There's no need to decorate these methods. ``append`` and ``pop`` are already
+instrumented as part of the ``list`` interface. Decorating them would fire
+duplicate events, which should be avoided.
+
+The targeted decoration tries not to rely on other methods in the underlying
+collection class, but some are unavoidable. Many depend on 'read' methods
+being present to properly instrument a 'write', for example, ``__setitem__``
+needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
+reimplemented in terms of atomic appends and removes, so the ``extend``
+decoration will actually perform many ``append`` operations and not call the
+underlying method at all.
+
+Tight control over bulk operation and the firing of events is also possible by
+implementing the instrumentation internally in your methods. The basic
+instrumentation package works under the general assumption that collection
+mutation will not raise unusual exceptions. If you want to closely
+orchestrate append and remove events with exception management, internal
+instrumentation may be the answer. Within your method,
+``collection_adapter(self)`` will retrieve an object that you can use for
+explicit control over triggering append and remove events.
+
+The owning object and :class:`.CollectionAttributeImpl` are also reachable
+through the adapter, allowing for some very sophisticated behavior.
+
+"""
+
+import operator
+import weakref
+
+from sqlalchemy.util.compat import inspect_getfullargspec
+from . import base
+from .. import exc as sa_exc
+from .. import util
+from ..sql import coercions
+from ..sql import expression
+from ..sql import roles
+
+__all__ = [
+ "collection",
+ "collection_adapter",
+ "mapped_collection",
+ "column_mapped_collection",
+ "attribute_mapped_collection",
+]
+
+__instrumentation_mutex = util.threading.Lock()
+
+
+class _PlainColumnGetter(object):
+ """Plain column getter, stores collection of Column objects
+ directly.
+
+ Serializes to a :class:`._SerializableColumnGetterV2`
+ which has more expensive __call__() performance
+ and some rare caveats.
+
+ """
+
+ def __init__(self, cols):
+ self.cols = cols
+ self.composite = len(cols) > 1
+
+ def __reduce__(self):
+ return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
+
+ def _cols(self, mapper):
+ return self.cols
+
+ def __call__(self, value):
+ state = base.instance_state(value)
+ m = base._state_mapper(state)
+
+ key = [
+ m._get_state_attr_by_column(state, state.dict, col)
+ for col in self._cols(m)
+ ]
+
+ if self.composite:
+ return tuple(key)
+ else:
+ return key[0]
+
+
+class _SerializableColumnGetter(object):
+ """Column-based getter used in version 0.7.6 only.
+
+ Remains here for pickle compatibility with 0.7.6.
+
+ """
+
+ def __init__(self, colkeys):
+ self.colkeys = colkeys
+ self.composite = len(colkeys) > 1
+
+ def __reduce__(self):
+ return _SerializableColumnGetter, (self.colkeys,)
+
+ def __call__(self, value):
+ state = base.instance_state(value)
+ m = base._state_mapper(state)
+ key = [
+ m._get_state_attr_by_column(
+ state, state.dict, m.mapped_table.columns[k]
+ )
+ for k in self.colkeys
+ ]
+ if self.composite:
+ return tuple(key)
+ else:
+ return key[0]
+
+
+class _SerializableColumnGetterV2(_PlainColumnGetter):
+ """Updated serializable getter which deals with
+ multi-table mapped classes.
+
+ Two extremely unusual cases are not supported.
+ Mappings which have tables across multiple metadata
+ objects, or which are mapped to non-Table selectables
+ linked across inheriting mappers may fail to function
+ here.
+
+ """
+
+ def __init__(self, colkeys):
+ self.colkeys = colkeys
+ self.composite = len(colkeys) > 1
+
+ def __reduce__(self):
+ return self.__class__, (self.colkeys,)
+
+ @classmethod
+ def _reduce_from_cols(cls, cols):
+ def _table_key(c):
+ if not isinstance(c.table, expression.TableClause):
+ return None
+ else:
+ return c.table.key
+
+ colkeys = [(c.key, _table_key(c)) for c in cols]
+ return _SerializableColumnGetterV2, (colkeys,)
+
+ def _cols(self, mapper):
+ cols = []
+ metadata = getattr(mapper.local_table, "metadata", None)
+ for (ckey, tkey) in self.colkeys:
+ if tkey is None or metadata is None or tkey not in metadata:
+ cols.append(mapper.local_table.c[ckey])
+ else:
+ cols.append(metadata.tables[tkey].c[ckey])
+ return cols
+
+
+def column_mapped_collection(mapping_spec):
+ """A dictionary-based collection type with column-based keying.
+
+ Returns a :class:`.MappedCollection` factory with a keying function
+ generated from mapping_spec, which may be a Column or a sequence
+ of Columns.
+
+ The key value must be immutable for the lifetime of the object. You
+ can not, for example, map on foreign key values if those key values will
+ change during the session, i.e. from None to a database-assigned integer
+ after a session flush.
+
+ """
+ cols = [
+ coercions.expect(roles.ColumnArgumentRole, q, argname="mapping_spec")
+ for q in util.to_list(mapping_spec)
+ ]
+ keyfunc = _PlainColumnGetter(cols)
+ return lambda: MappedCollection(keyfunc)
+
+
+class _SerializableAttrGetter(object):
+ def __init__(self, name):
+ self.name = name
+ self.getter = operator.attrgetter(name)
+
+ def __call__(self, target):
+ return self.getter(target)
+
+ def __reduce__(self):
+ return _SerializableAttrGetter, (self.name,)
+
+
+def attribute_mapped_collection(attr_name):
+ """A dictionary-based collection type with attribute-based keying.
+
+ Returns a :class:`.MappedCollection` factory with a keying based on the
+ 'attr_name' attribute of entities in the collection, where ``attr_name``
+ is the string name of the attribute.
+
+ .. warning:: the key value must be assigned to its final value
+ **before** it is accessed by the attribute mapped collection.
+ Additionally, changes to the key attribute are **not tracked**
+ automatically, which means the key in the dictionary is not
+ automatically synchronized with the key value on the target object
+ itself. See the section :ref:`key_collections_mutations`
+ for an example.
+
+ """
+ getter = _SerializableAttrGetter(attr_name)
+ return lambda: MappedCollection(getter)
+
+
+def mapped_collection(keyfunc):
+ """A dictionary-based collection type with arbitrary keying.
+
+ Returns a :class:`.MappedCollection` factory with a keying function
+ generated from keyfunc, a callable that takes an entity and returns a
+ key value.
+
+ The key value must be immutable for the lifetime of the object. You
+ can not, for example, map on foreign key values if those key values will
+ change during the session, i.e. from None to a database-assigned integer
+ after a session flush.
+
+ """
+ return lambda: MappedCollection(keyfunc)
+
+
+class collection(object):
+ """Decorators for entity collection classes.
+
+ The decorators fall into two groups: annotations and interception recipes.
+
+ The annotating decorators (appender, remover, iterator, converter,
+ internally_instrumented) indicate the method's purpose and take no
+ arguments. They are not written with parens::
+
+ @collection.appender
+ def append(self, append): ...
+
+ The recipe decorators all require parens, even those that take no
+ arguments::
+
+ @collection.adds('entity')
+ def insert(self, position, entity): ...
+
+ @collection.removes_return()
+ def popitem(self): ...
+
+ """
+
+ # Bundled as a class solely for ease of use: packaging, doc strings,
+ # importability.
+
+ @staticmethod
+ def appender(fn):
+ """Tag the method as the collection appender.
+
+ The appender method is called with one positional argument: the value
+ to append. The method will be automatically decorated with 'adds(1)'
+ if not already decorated::
+
+ @collection.appender
+ def add(self, append): ...
+
+ # or, equivalently
+ @collection.appender
+ @collection.adds(1)
+ def add(self, append): ...
+
+ # for mapping type, an 'append' may kick out a previous value
+ # that occupies that slot. consider d['a'] = 'foo'- any previous
+ # value in d['a'] is discarded.
+ @collection.appender
+ @collection.replaces(1)
+ def add(self, entity):
+ key = some_key_func(entity)
+ previous = None
+ if key in self:
+ previous = self[key]
+ self[key] = entity
+ return previous
+
+ If the value to append is not allowed in the collection, you may
+ raise an exception. Something to remember is that the appender
+ will be called for each object mapped by a database query. If the
+ database contains rows that violate your collection semantics, you
+ will need to get creative to fix the problem, as access via the
+ collection will not work.
+
+ If the appender method is internally instrumented, you must also
+ receive the keyword argument '_sa_initiator' and ensure its
+ promulgation to collection events.
+
+ """
+ fn._sa_instrument_role = "appender"
+ return fn
+
+ @staticmethod
+ def remover(fn):
+ """Tag the method as the collection remover.
+
+ The remover method is called with one positional argument: the value
+ to remove. The method will be automatically decorated with
+ :meth:`removes_return` if not already decorated::
+
+ @collection.remover
+ def zap(self, entity): ...
+
+ # or, equivalently
+ @collection.remover
+ @collection.removes_return()
+ def zap(self, ): ...
+
+ If the value to remove is not present in the collection, you may
+ raise an exception or return None to ignore the error.
+
+ If the remove method is internally instrumented, you must also
+ receive the keyword argument '_sa_initiator' and ensure its
+ promulgation to collection events.
+
+ """
+ fn._sa_instrument_role = "remover"
+ return fn
+
+ @staticmethod
+ def iterator(fn):
+ """Tag the method as the collection remover.
+
+ The iterator method is called with no arguments. It is expected to
+ return an iterator over all collection members::
+
+ @collection.iterator
+ def __iter__(self): ...
+
+ """
+ fn._sa_instrument_role = "iterator"
+ return fn
+
+ @staticmethod
+ def internally_instrumented(fn):
+ """Tag the method as instrumented.
+
+ This tag will prevent any decoration from being applied to the
+ method. Use this if you are orchestrating your own calls to
+ :func:`.collection_adapter` in one of the basic SQLAlchemy
+ interface methods, or to prevent an automatic ABC method
+ decoration from wrapping your implementation::
+
+ # normally an 'extend' method on a list-like class would be
+ # automatically intercepted and re-implemented in terms of
+ # SQLAlchemy events and append(). your implementation will
+ # never be called, unless:
+ @collection.internally_instrumented
+ def extend(self, items): ...
+
+ """
+ fn._sa_instrumented = True
+ return fn
+
+ @staticmethod
+ @util.deprecated(
+ "1.3",
+ "The :meth:`.collection.converter` handler is deprecated and will "
+ "be removed in a future release. Please refer to the "
+ ":class:`.AttributeEvents.bulk_replace` listener interface in "
+ "conjunction with the :func:`.event.listen` function.",
+ )
+ def converter(fn):
+ """Tag the method as the collection converter.
+
+ This optional method will be called when a collection is being
+ replaced entirely, as in::
+
+ myobj.acollection = [newvalue1, newvalue2]
+
+ The converter method will receive the object being assigned and should
+ return an iterable of values suitable for use by the ``appender``
+ method. A converter must not assign values or mutate the collection,
+ its sole job is to adapt the value the user provides into an iterable
+ of values for the ORM's use.
+
+ The default converter implementation will use duck-typing to do the
+ conversion. A dict-like collection will be convert into an iterable
+ of dictionary values, and other types will simply be iterated::
+
+ @collection.converter
+ def convert(self, other): ...
+
+ If the duck-typing of the object does not match the type of this
+ collection, a TypeError is raised.
+
+ Supply an implementation of this method if you want to expand the
+ range of possible types that can be assigned in bulk or perform
+ validation on the values about to be assigned.
+
+ """
+ fn._sa_instrument_role = "converter"
+ return fn
+
+ @staticmethod
+ def adds(arg):
+ """Mark the method as adding an entity to the collection.
+
+ Adds "add to collection" handling to the method. The decorator
+ argument indicates which method argument holds the SQLAlchemy-relevant
+ value. Arguments can be specified positionally (i.e. integer) or by
+ name::
+
+ @collection.adds(1)
+ def push(self, item): ...
+
+ @collection.adds('entity')
+ def do_stuff(self, thing, entity=None): ...
+
+ """
+
+ def decorator(fn):
+ fn._sa_instrument_before = ("fire_append_event", arg)
+ return fn
+
+ return decorator
+
+ @staticmethod
+ def replaces(arg):
+ """Mark the method as replacing an entity in the collection.
+
+ Adds "add to collection" and "remove from collection" handling to
+ the method. The decorator argument indicates which method argument
+ holds the SQLAlchemy-relevant value to be added, and return value, if
+ any will be considered the value to remove.
+
+ Arguments can be specified positionally (i.e. integer) or by name::
+
+ @collection.replaces(2)
+ def __setitem__(self, index, item): ...
+
+ """
+
+ def decorator(fn):
+ fn._sa_instrument_before = ("fire_append_event", arg)
+ fn._sa_instrument_after = "fire_remove_event"
+ return fn
+
+ return decorator
+
+ @staticmethod
+ def removes(arg):
+ """Mark the method as removing an entity in the collection.
+
+ Adds "remove from collection" handling to the method. The decorator
+ argument indicates which method argument holds the SQLAlchemy-relevant
+ value to be removed. Arguments can be specified positionally (i.e.
+ integer) or by name::
+
+ @collection.removes(1)
+ def zap(self, item): ...
+
+ For methods where the value to remove is not known at call-time, use
+ collection.removes_return.
+
+ """
+
+ def decorator(fn):
+ fn._sa_instrument_before = ("fire_remove_event", arg)
+ return fn
+
+ return decorator
+
+ @staticmethod
+ def removes_return():
+ """Mark the method as removing an entity in the collection.
+
+ Adds "remove from collection" handling to the method. The return
+ value of the method, if any, is considered the value to remove. The
+ method arguments are not inspected::
+
+ @collection.removes_return()
+ def pop(self): ...
+
+ For methods where the value to remove is known at call-time, use
+ collection.remove.
+
+ """
+
+ def decorator(fn):
+ fn._sa_instrument_after = "fire_remove_event"
+ return fn
+
+ return decorator
+
+
+collection_adapter = operator.attrgetter("_sa_adapter")
+"""Fetch the :class:`.CollectionAdapter` for a collection."""
+
+
+class CollectionAdapter(object):
+ """Bridges between the ORM and arbitrary Python collections.
+
+ Proxies base-level collection operations (append, remove, iterate)
+ to the underlying Python collection, and emits add/remove events for
+ entities entering or leaving the collection.
+
+ The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
+ entity collections.
+
+
+ """
+
+ __slots__ = (
+ "attr",
+ "_key",
+ "_data",
+ "owner_state",
+ "_converter",
+ "invalidated",
+ "empty",
+ )
+
+ def __init__(self, attr, owner_state, data):
+ self.attr = attr
+ self._key = attr.key
+ self._data = weakref.ref(data)
+ self.owner_state = owner_state
+ data._sa_adapter = self
+ self._converter = data._sa_converter
+ self.invalidated = False
+ self.empty = False
+
+ def _warn_invalidated(self):
+ util.warn("This collection has been invalidated.")
+
+ @property
+ def data(self):
+ "The entity collection being adapted."
+ return self._data()
+
+ @property
+ def _referenced_by_owner(self):
+ """return True if the owner state still refers to this collection.
+
+ This will return False within a bulk replace operation,
+ where this collection is the one being replaced.
+
+ """
+ return self.owner_state.dict[self._key] is self._data()
+
+ def bulk_appender(self):
+ return self._data()._sa_appender
+
+ def append_with_event(self, item, initiator=None):
+ """Add an entity to the collection, firing mutation events."""
+
+ self._data()._sa_appender(item, _sa_initiator=initiator)
+
+ def _set_empty(self, user_data):
+ assert (
+ not self.empty
+ ), "This collection adapter is already in the 'empty' state"
+ self.empty = True
+ self.owner_state._empty_collections[self._key] = user_data
+
+ def _reset_empty(self):
+ assert (
+ self.empty
+ ), "This collection adapter is not in the 'empty' state"
+ self.empty = False
+ self.owner_state.dict[
+ self._key
+ ] = self.owner_state._empty_collections.pop(self._key)
+
+ def _refuse_empty(self):
+ raise sa_exc.InvalidRequestError(
+ "This is a special 'empty' collection which cannot accommodate "
+ "internal mutation operations"
+ )
+
+ def append_without_event(self, item):
+ """Add or restore an entity to the collection, firing no events."""
+
+ if self.empty:
+ self._refuse_empty()
+ self._data()._sa_appender(item, _sa_initiator=False)
+
+ def append_multiple_without_event(self, items):
+ """Add or restore an entity to the collection, firing no events."""
+ if self.empty:
+ self._refuse_empty()
+ appender = self._data()._sa_appender
+ for item in items:
+ appender(item, _sa_initiator=False)
+
+ def bulk_remover(self):
+ return self._data()._sa_remover
+
+ def remove_with_event(self, item, initiator=None):
+ """Remove an entity from the collection, firing mutation events."""
+ self._data()._sa_remover(item, _sa_initiator=initiator)
+
+ def remove_without_event(self, item):
+ """Remove an entity from the collection, firing no events."""
+ if self.empty:
+ self._refuse_empty()
+ self._data()._sa_remover(item, _sa_initiator=False)
+
+ def clear_with_event(self, initiator=None):
+ """Empty the collection, firing a mutation event for each entity."""
+
+ if self.empty:
+ self._refuse_empty()
+ remover = self._data()._sa_remover
+ for item in list(self):
+ remover(item, _sa_initiator=initiator)
+
+ def clear_without_event(self):
+ """Empty the collection, firing no events."""
+
+ if self.empty:
+ self._refuse_empty()
+ remover = self._data()._sa_remover
+ for item in list(self):
+ remover(item, _sa_initiator=False)
+
+ def __iter__(self):
+ """Iterate over entities in the collection."""
+
+ return iter(self._data()._sa_iterator())
+
+ def __len__(self):
+ """Count entities in the collection."""
+ return len(list(self._data()._sa_iterator()))
+
+ def __bool__(self):
+ return True
+
+ __nonzero__ = __bool__
+
+ def fire_append_wo_mutation_event(self, item, initiator=None):
+ """Notify that a entity is entering the collection but is already
+ present.
+
+
+ Initiator is a token owned by the InstrumentedAttribute that
+ initiated the membership mutation, and should be left as None
+ unless you are passing along an initiator value from a chained
+ operation.
+
+ .. versionadded:: 1.4.15
+
+ """
+ if initiator is not False:
+ if self.invalidated:
+ self._warn_invalidated()
+
+ if self.empty:
+ self._reset_empty()
+
+ return self.attr.fire_append_wo_mutation_event(
+ self.owner_state, self.owner_state.dict, item, initiator
+ )
+ else:
+ return item
+
+ def fire_append_event(self, item, initiator=None):
+ """Notify that a entity has entered the collection.
+
+ Initiator is a token owned by the InstrumentedAttribute that
+ initiated the membership mutation, and should be left as None
+ unless you are passing along an initiator value from a chained
+ operation.
+
+ """
+ if initiator is not False:
+ if self.invalidated:
+ self._warn_invalidated()
+
+ if self.empty:
+ self._reset_empty()
+
+ return self.attr.fire_append_event(
+ self.owner_state, self.owner_state.dict, item, initiator
+ )
+ else:
+ return item
+
+ def fire_remove_event(self, item, initiator=None):
+ """Notify that a entity has been removed from the collection.
+
+ Initiator is the InstrumentedAttribute that initiated the membership
+ mutation, and should be left as None unless you are passing along
+ an initiator value from a chained operation.
+
+ """
+ if initiator is not False:
+ if self.invalidated:
+ self._warn_invalidated()
+
+ if self.empty:
+ self._reset_empty()
+
+ self.attr.fire_remove_event(
+ self.owner_state, self.owner_state.dict, item, initiator
+ )
+
+ def fire_pre_remove_event(self, initiator=None):
+ """Notify that an entity is about to be removed from the collection.
+
+ Only called if the entity cannot be removed after calling
+ fire_remove_event().
+
+ """
+ if self.invalidated:
+ self._warn_invalidated()
+ self.attr.fire_pre_remove_event(
+ self.owner_state, self.owner_state.dict, initiator=initiator
+ )
+
+ def __getstate__(self):
+ return {
+ "key": self._key,
+ "owner_state": self.owner_state,
+ "owner_cls": self.owner_state.class_,
+ "data": self.data,
+ "invalidated": self.invalidated,
+ "empty": self.empty,
+ }
+
+ def __setstate__(self, d):
+ self._key = d["key"]
+ self.owner_state = d["owner_state"]
+ self._data = weakref.ref(d["data"])
+ self._converter = d["data"]._sa_converter
+ d["data"]._sa_adapter = self
+ self.invalidated = d["invalidated"]
+ self.attr = getattr(d["owner_cls"], self._key).impl
+ self.empty = d.get("empty", False)
+
+
+def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
+ """Load a new collection, firing events based on prior like membership.
+
+ Appends instances in ``values`` onto the ``new_adapter``. Events will be
+ fired for any instance not present in the ``existing_adapter``. Any
+ instances in ``existing_adapter`` not present in ``values`` will have
+ remove events fired upon them.
+
+ :param values: An iterable of collection member instances
+
+ :param existing_adapter: A :class:`.CollectionAdapter` of
+ instances to be replaced
+
+ :param new_adapter: An empty :class:`.CollectionAdapter`
+ to load with ``values``
+
+
+ """
+
+ assert isinstance(values, list)
+
+ idset = util.IdentitySet
+ existing_idset = idset(existing_adapter or ())
+ constants = existing_idset.intersection(values or ())
+ additions = idset(values or ()).difference(constants)
+ removals = existing_idset.difference(constants)
+
+ appender = new_adapter.bulk_appender()
+
+ for member in values or ():
+ if member in additions:
+ appender(member, _sa_initiator=initiator)
+ elif member in constants:
+ appender(member, _sa_initiator=False)
+
+ if existing_adapter:
+ for member in removals:
+ existing_adapter.fire_remove_event(member, initiator=initiator)
+
+
+def prepare_instrumentation(factory):
+ """Prepare a callable for future use as a collection class factory.
+
+ Given a collection class factory (either a type or no-arg callable),
+ return another factory that will produce compatible instances when
+ called.
+
+ This function is responsible for converting collection_class=list
+ into the run-time behavior of collection_class=InstrumentedList.
+
+ """
+ # Convert a builtin to 'Instrumented*'
+ if factory in __canned_instrumentation:
+ factory = __canned_instrumentation[factory]
+
+ # Create a specimen
+ cls = type(factory())
+
+ # Did factory callable return a builtin?
+ if cls in __canned_instrumentation:
+ # Wrap it so that it returns our 'Instrumented*'
+ factory = __converting_factory(cls, factory)
+ cls = factory()
+
+ # Instrument the class if needed.
+ if __instrumentation_mutex.acquire():
+ try:
+ if getattr(cls, "_sa_instrumented", None) != id(cls):
+ _instrument_class(cls)
+ finally:
+ __instrumentation_mutex.release()
+
+ return factory
+
+
+def __converting_factory(specimen_cls, original_factory):
+ """Return a wrapper that converts a "canned" collection like
+ set, dict, list into the Instrumented* version.
+
+ """
+
+ instrumented_cls = __canned_instrumentation[specimen_cls]
+
+ def wrapper():
+ collection = original_factory()
+ return instrumented_cls(collection)
+
+ # often flawed but better than nothing
+ wrapper.__name__ = "%sWrapper" % original_factory.__name__
+ wrapper.__doc__ = original_factory.__doc__
+
+ return wrapper
+
+
+def _instrument_class(cls):
+ """Modify methods in a class and install instrumentation."""
+
+ # In the normal call flow, a request for any of the 3 basic collection
+ # types is transformed into one of our trivial subclasses
+ # (e.g. InstrumentedList). Catch anything else that sneaks in here...
+ if cls.__module__ == "__builtin__":
+ raise sa_exc.ArgumentError(
+ "Can not instrument a built-in type. Use a "
+ "subclass, even a trivial one."
+ )
+
+ roles, methods = _locate_roles_and_methods(cls)
+
+ _setup_canned_roles(cls, roles, methods)
+
+ _assert_required_roles(cls, roles, methods)
+
+ _set_collection_attributes(cls, roles, methods)
+
+
+def _locate_roles_and_methods(cls):
+ """search for _sa_instrument_role-decorated methods in
+ method resolution order, assign to roles.
+
+ """
+
+ roles = {}
+ methods = {}
+
+ for supercls in cls.__mro__:
+ for name, method in vars(supercls).items():
+ if not callable(method):
+ continue
+
+ # note role declarations
+ if hasattr(method, "_sa_instrument_role"):
+ role = method._sa_instrument_role
+ assert role in (
+ "appender",
+ "remover",
+ "iterator",
+ "converter",
+ )
+ roles.setdefault(role, name)
+
+ # transfer instrumentation requests from decorated function
+ # to the combined queue
+ before, after = None, None
+ if hasattr(method, "_sa_instrument_before"):
+ op, argument = method._sa_instrument_before
+ assert op in ("fire_append_event", "fire_remove_event")
+ before = op, argument
+ if hasattr(method, "_sa_instrument_after"):
+ op = method._sa_instrument_after
+ assert op in ("fire_append_event", "fire_remove_event")
+ after = op
+ if before:
+ methods[name] = before + (after,)
+ elif after:
+ methods[name] = None, None, after
+ return roles, methods
+
+
+def _setup_canned_roles(cls, roles, methods):
+ """see if this class has "canned" roles based on a known
+ collection type (dict, set, list). Apply those roles
+ as needed to the "roles" dictionary, and also
+ prepare "decorator" methods
+
+ """
+ collection_type = util.duck_type_collection(cls)
+ if collection_type in __interfaces:
+ canned_roles, decorators = __interfaces[collection_type]
+ for role, name in canned_roles.items():
+ roles.setdefault(role, name)
+
+ # apply ABC auto-decoration to methods that need it
+ for method, decorator in decorators.items():
+ fn = getattr(cls, method, None)
+ if (
+ fn
+ and method not in methods
+ and not hasattr(fn, "_sa_instrumented")
+ ):
+ setattr(cls, method, decorator(fn))
+
+
+def _assert_required_roles(cls, roles, methods):
+ """ensure all roles are present, and apply implicit instrumentation if
+ needed
+
+ """
+ if "appender" not in roles or not hasattr(cls, roles["appender"]):
+ raise sa_exc.ArgumentError(
+ "Type %s must elect an appender method to be "
+ "a collection class" % cls.__name__
+ )
+ elif roles["appender"] not in methods and not hasattr(
+ getattr(cls, roles["appender"]), "_sa_instrumented"
+ ):
+ methods[roles["appender"]] = ("fire_append_event", 1, None)
+
+ if "remover" not in roles or not hasattr(cls, roles["remover"]):
+ raise sa_exc.ArgumentError(
+ "Type %s must elect a remover method to be "
+ "a collection class" % cls.__name__
+ )
+ elif roles["remover"] not in methods and not hasattr(
+ getattr(cls, roles["remover"]), "_sa_instrumented"
+ ):
+ methods[roles["remover"]] = ("fire_remove_event", 1, None)
+
+ if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
+ raise sa_exc.ArgumentError(
+ "Type %s must elect an iterator method to be "
+ "a collection class" % cls.__name__
+ )
+
+
+def _set_collection_attributes(cls, roles, methods):
+ """apply ad-hoc instrumentation from decorators, class-level defaults
+ and implicit role declarations
+
+ """
+ for method_name, (before, argument, after) in methods.items():
+ setattr(
+ cls,
+ method_name,
+ _instrument_membership_mutator(
+ getattr(cls, method_name), before, argument, after
+ ),
+ )
+ # intern the role map
+ for role, method_name in roles.items():
+ setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
+
+ cls._sa_adapter = None
+
+ if not hasattr(cls, "_sa_converter"):
+ cls._sa_converter = None
+ cls._sa_instrumented = id(cls)
+
+
+def _instrument_membership_mutator(method, before, argument, after):
+ """Route method args and/or return value through the collection
+ adapter."""
+ # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
+ if before:
+ fn_args = list(
+ util.flatten_iterator(inspect_getfullargspec(method)[0])
+ )
+ if isinstance(argument, int):
+ pos_arg = argument
+ named_arg = len(fn_args) > argument and fn_args[argument] or None
+ else:
+ if argument in fn_args:
+ pos_arg = fn_args.index(argument)
+ else:
+ pos_arg = None
+ named_arg = argument
+ del fn_args
+
+ def wrapper(*args, **kw):
+ if before:
+ if pos_arg is None:
+ if named_arg not in kw:
+ raise sa_exc.ArgumentError(
+ "Missing argument %s" % argument
+ )
+ value = kw[named_arg]
+ else:
+ if len(args) > pos_arg:
+ value = args[pos_arg]
+ elif named_arg in kw:
+ value = kw[named_arg]
+ else:
+ raise sa_exc.ArgumentError(
+ "Missing argument %s" % argument
+ )
+
+ initiator = kw.pop("_sa_initiator", None)
+ if initiator is False:
+ executor = None
+ else:
+ executor = args[0]._sa_adapter
+
+ if before and executor:
+ getattr(executor, before)(value, initiator)
+
+ if not after or not executor:
+ return method(*args, **kw)
+ else:
+ res = method(*args, **kw)
+ if res is not None:
+ getattr(executor, after)(res, initiator)
+ return res
+
+ wrapper._sa_instrumented = True
+ if hasattr(method, "_sa_instrument_role"):
+ wrapper._sa_instrument_role = method._sa_instrument_role
+ wrapper.__name__ = method.__name__
+ wrapper.__doc__ = method.__doc__
+ return wrapper
+
+
+def __set_wo_mutation(collection, item, _sa_initiator=None):
+ """Run set wo mutation events.
+
+ The collection is not mutated.
+
+ """
+ if _sa_initiator is not False:
+ executor = collection._sa_adapter
+ if executor:
+ executor.fire_append_wo_mutation_event(item, _sa_initiator)
+
+
+def __set(collection, item, _sa_initiator=None):
+ """Run set events.
+
+ This event always occurs before the collection is actually mutated.
+
+ """
+
+ if _sa_initiator is not False:
+ executor = collection._sa_adapter
+ if executor:
+ item = executor.fire_append_event(item, _sa_initiator)
+ return item
+
+
+def __del(collection, item, _sa_initiator=None):
+ """Run del events.
+
+ This event occurs before the collection is actually mutated, *except*
+ in the case of a pop operation, in which case it occurs afterwards.
+ For pop operations, the __before_pop hook is called before the
+ operation occurs.
+
+ """
+ if _sa_initiator is not False:
+ executor = collection._sa_adapter
+ if executor:
+ executor.fire_remove_event(item, _sa_initiator)
+
+
+def __before_pop(collection, _sa_initiator=None):
+ """An event which occurs on a before a pop() operation occurs."""
+ executor = collection._sa_adapter
+ if executor:
+ executor.fire_pre_remove_event(_sa_initiator)
+
+
+def _list_decorators():
+ """Tailored instrumentation wrappers for any list-like class."""
+
+ def _tidy(fn):
+ fn._sa_instrumented = True
+ fn.__doc__ = getattr(list, fn.__name__).__doc__
+
+ def append(fn):
+ def append(self, item, _sa_initiator=None):
+ item = __set(self, item, _sa_initiator)
+ fn(self, item)
+
+ _tidy(append)
+ return append
+
+ def remove(fn):
+ def remove(self, value, _sa_initiator=None):
+ __del(self, value, _sa_initiator)
+ # testlib.pragma exempt:__eq__
+ fn(self, value)
+
+ _tidy(remove)
+ return remove
+
+ def insert(fn):
+ def insert(self, index, value):
+ value = __set(self, value)
+ fn(self, index, value)
+
+ _tidy(insert)
+ return insert
+
+ def __setitem__(fn):
+ def __setitem__(self, index, value):
+ if not isinstance(index, slice):
+ existing = self[index]
+ if existing is not None:
+ __del(self, existing)
+ value = __set(self, value)
+ fn(self, index, value)
+ else:
+ # slice assignment requires __delitem__, insert, __len__
+ step = index.step or 1
+ start = index.start or 0
+ if start < 0:
+ start += len(self)
+ if index.stop is not None:
+ stop = index.stop
+ else:
+ stop = len(self)
+ if stop < 0:
+ stop += len(self)
+
+ if step == 1:
+ if value is self:
+ return
+ for i in range(start, stop, step):
+ if len(self) > start:
+ del self[start]
+
+ for i, item in enumerate(value):
+ self.insert(i + start, item)
+ else:
+ rng = list(range(start, stop, step))
+ if len(value) != len(rng):
+ raise ValueError(
+ "attempt to assign sequence of size %s to "
+ "extended slice of size %s"
+ % (len(value), len(rng))
+ )
+ for i, item in zip(rng, value):
+ self.__setitem__(i, item)
+
+ _tidy(__setitem__)
+ return __setitem__
+
+ def __delitem__(fn):
+ def __delitem__(self, index):
+ if not isinstance(index, slice):
+ item = self[index]
+ __del(self, item)
+ fn(self, index)
+ else:
+ # slice deletion requires __getslice__ and a slice-groking
+ # __getitem__ for stepped deletion
+ # note: not breaking this into atomic dels
+ for item in self[index]:
+ __del(self, item)
+ fn(self, index)
+
+ _tidy(__delitem__)
+ return __delitem__
+
+ if util.py2k:
+
+ def __setslice__(fn):
+ def __setslice__(self, start, end, values):
+ for value in self[start:end]:
+ __del(self, value)
+ values = [__set(self, value) for value in values]
+ fn(self, start, end, values)
+
+ _tidy(__setslice__)
+ return __setslice__
+
+ def __delslice__(fn):
+ def __delslice__(self, start, end):
+ for value in self[start:end]:
+ __del(self, value)
+ fn(self, start, end)
+
+ _tidy(__delslice__)
+ return __delslice__
+
+ def extend(fn):
+ def extend(self, iterable):
+ for value in list(iterable):
+ self.append(value)
+
+ _tidy(extend)
+ return extend
+
+ def __iadd__(fn):
+ def __iadd__(self, iterable):
+ # list.__iadd__ takes any iterable and seems to let TypeError
+ # raise as-is instead of returning NotImplemented
+ for value in list(iterable):
+ self.append(value)
+ return self
+
+ _tidy(__iadd__)
+ return __iadd__
+
+ def pop(fn):
+ def pop(self, index=-1):
+ __before_pop(self)
+ item = fn(self, index)
+ __del(self, item)
+ return item
+
+ _tidy(pop)
+ return pop
+
+ if not util.py2k:
+
+ def clear(fn):
+ def clear(self, index=-1):
+ for item in self:
+ __del(self, item)
+ fn(self)
+
+ _tidy(clear)
+ return clear
+
+ # __imul__ : not wrapping this. all members of the collection are already
+ # present, so no need to fire appends... wrapping it with an explicit
+ # decorator is still possible, so events on *= can be had if they're
+ # desired. hard to imagine a use case for __imul__, though.
+
+ l = locals().copy()
+ l.pop("_tidy")
+ return l
+
+
+def _dict_decorators():
+ """Tailored instrumentation wrappers for any dict-like mapping class."""
+
+ def _tidy(fn):
+ fn._sa_instrumented = True
+ fn.__doc__ = getattr(dict, fn.__name__).__doc__
+
+ Unspecified = util.symbol("Unspecified")
+
+ def __setitem__(fn):
+ def __setitem__(self, key, value, _sa_initiator=None):
+ if key in self:
+ __del(self, self[key], _sa_initiator)
+ value = __set(self, value, _sa_initiator)
+ fn(self, key, value)
+
+ _tidy(__setitem__)
+ return __setitem__
+
+ def __delitem__(fn):
+ def __delitem__(self, key, _sa_initiator=None):
+ if key in self:
+ __del(self, self[key], _sa_initiator)
+ fn(self, key)
+
+ _tidy(__delitem__)
+ return __delitem__
+
+ def clear(fn):
+ def clear(self):
+ for key in self:
+ __del(self, self[key])
+ fn(self)
+
+ _tidy(clear)
+ return clear
+
+ def pop(fn):
+ def pop(self, key, default=Unspecified):
+ __before_pop(self)
+ _to_del = key in self
+ if default is Unspecified:
+ item = fn(self, key)
+ else:
+ item = fn(self, key, default)
+ if _to_del:
+ __del(self, item)
+ return item
+
+ _tidy(pop)
+ return pop
+
+ def popitem(fn):
+ def popitem(self):
+ __before_pop(self)
+ item = fn(self)
+ __del(self, item[1])
+ return item
+
+ _tidy(popitem)
+ return popitem
+
+ def setdefault(fn):
+ def setdefault(self, key, default=None):
+ if key not in self:
+ self.__setitem__(key, default)
+ return default
+ else:
+ value = self.__getitem__(key)
+ if value is default:
+ __set_wo_mutation(self, value, None)
+
+ return value
+
+ _tidy(setdefault)
+ return setdefault
+
+ def update(fn):
+ def update(self, __other=Unspecified, **kw):
+ if __other is not Unspecified:
+ if hasattr(__other, "keys"):
+ for key in list(__other):
+ if key not in self or self[key] is not __other[key]:
+ self[key] = __other[key]
+ else:
+ __set_wo_mutation(self, __other[key], None)
+ else:
+ for key, value in __other:
+ if key not in self or self[key] is not value:
+ self[key] = value
+ else:
+ __set_wo_mutation(self, value, None)
+ for key in kw:
+ if key not in self or self[key] is not kw[key]:
+ self[key] = kw[key]
+ else:
+ __set_wo_mutation(self, kw[key], None)
+
+ _tidy(update)
+ return update
+
+ l = locals().copy()
+ l.pop("_tidy")
+ l.pop("Unspecified")
+ return l
+
+
+_set_binop_bases = (set, frozenset)
+
+
+def _set_binops_check_strict(self, obj):
+ """Allow only set, frozenset and self.__class__-derived
+ objects in binops."""
+ return isinstance(obj, _set_binop_bases + (self.__class__,))
+
+
+def _set_binops_check_loose(self, obj):
+ """Allow anything set-like to participate in set binops."""
+ return (
+ isinstance(obj, _set_binop_bases + (self.__class__,))
+ or util.duck_type_collection(obj) == set
+ )
+
+
+def _set_decorators():
+ """Tailored instrumentation wrappers for any set-like class."""
+
+ def _tidy(fn):
+ fn._sa_instrumented = True
+ fn.__doc__ = getattr(set, fn.__name__).__doc__
+
+ Unspecified = util.symbol("Unspecified")
+
+ def add(fn):
+ def add(self, value, _sa_initiator=None):
+ if value not in self:
+ value = __set(self, value, _sa_initiator)
+ else:
+ __set_wo_mutation(self, value, _sa_initiator)
+ # testlib.pragma exempt:__hash__
+ fn(self, value)
+
+ _tidy(add)
+ return add
+
+ def discard(fn):
+ def discard(self, value, _sa_initiator=None):
+ # testlib.pragma exempt:__hash__
+ if value in self:
+ __del(self, value, _sa_initiator)
+ # testlib.pragma exempt:__hash__
+ fn(self, value)
+
+ _tidy(discard)
+ return discard
+
+ def remove(fn):
+ def remove(self, value, _sa_initiator=None):
+ # testlib.pragma exempt:__hash__
+ if value in self:
+ __del(self, value, _sa_initiator)
+ # testlib.pragma exempt:__hash__
+ fn(self, value)
+
+ _tidy(remove)
+ return remove
+
+ def pop(fn):
+ def pop(self):
+ __before_pop(self)
+ item = fn(self)
+ # for set in particular, we have no way to access the item
+ # that will be popped before pop is called.
+ __del(self, item)
+ return item
+
+ _tidy(pop)
+ return pop
+
+ def clear(fn):
+ def clear(self):
+ for item in list(self):
+ self.remove(item)
+
+ _tidy(clear)
+ return clear
+
+ def update(fn):
+ def update(self, value):
+ for item in value:
+ self.add(item)
+
+ _tidy(update)
+ return update
+
+ def __ior__(fn):
+ def __ior__(self, value):
+ if not _set_binops_check_strict(self, value):
+ return NotImplemented
+ for item in value:
+ self.add(item)
+ return self
+
+ _tidy(__ior__)
+ return __ior__
+
+ def difference_update(fn):
+ def difference_update(self, value):
+ for item in value:
+ self.discard(item)
+
+ _tidy(difference_update)
+ return difference_update
+
+ def __isub__(fn):
+ def __isub__(self, value):
+ if not _set_binops_check_strict(self, value):
+ return NotImplemented
+ for item in value:
+ self.discard(item)
+ return self
+
+ _tidy(__isub__)
+ return __isub__
+
+ def intersection_update(fn):
+ def intersection_update(self, other):
+ want, have = self.intersection(other), set(self)
+ remove, add = have - want, want - have
+
+ for item in remove:
+ self.remove(item)
+ for item in add:
+ self.add(item)
+
+ _tidy(intersection_update)
+ return intersection_update
+
+ def __iand__(fn):
+ def __iand__(self, other):
+ if not _set_binops_check_strict(self, other):
+ return NotImplemented
+ want, have = self.intersection(other), set(self)
+ remove, add = have - want, want - have
+
+ for item in remove:
+ self.remove(item)
+ for item in add:
+ self.add(item)
+ return self
+
+ _tidy(__iand__)
+ return __iand__
+
+ def symmetric_difference_update(fn):
+ def symmetric_difference_update(self, other):
+ want, have = self.symmetric_difference(other), set(self)
+ remove, add = have - want, want - have
+
+ for item in remove:
+ self.remove(item)
+ for item in add:
+ self.add(item)
+
+ _tidy(symmetric_difference_update)
+ return symmetric_difference_update
+
+ def __ixor__(fn):
+ def __ixor__(self, other):
+ if not _set_binops_check_strict(self, other):
+ return NotImplemented
+ want, have = self.symmetric_difference(other), set(self)
+ remove, add = have - want, want - have
+
+ for item in remove:
+ self.remove(item)
+ for item in add:
+ self.add(item)
+ return self
+
+ _tidy(__ixor__)
+ return __ixor__
+
+ l = locals().copy()
+ l.pop("_tidy")
+ l.pop("Unspecified")
+ return l
+
+
+class InstrumentedList(list):
+ """An instrumented version of the built-in list."""
+
+
+class InstrumentedSet(set):
+ """An instrumented version of the built-in set."""
+
+
+class InstrumentedDict(dict):
+ """An instrumented version of the built-in dict."""
+
+
+__canned_instrumentation = {
+ list: InstrumentedList,
+ set: InstrumentedSet,
+ dict: InstrumentedDict,
+}
+
+__interfaces = {
+ list: (
+ {"appender": "append", "remover": "remove", "iterator": "__iter__"},
+ _list_decorators(),
+ ),
+ set: (
+ {"appender": "add", "remover": "remove", "iterator": "__iter__"},
+ _set_decorators(),
+ ),
+ # decorators are required for dicts and object collections.
+ dict: ({"iterator": "values"}, _dict_decorators())
+ if util.py3k
+ else ({"iterator": "itervalues"}, _dict_decorators()),
+}
+
+
+class MappedCollection(dict):
+ """A basic dictionary-based collection class.
+
+ Extends dict with the minimal bag semantics that collection
+ classes require. ``set`` and ``remove`` are implemented in terms
+ of a keying function: any callable that takes an object and
+ returns an object for use as a dictionary key.
+
+ """
+
+ def __init__(self, keyfunc):
+ """Create a new collection with keying provided by keyfunc.
+
+ keyfunc may be any callable that takes an object and returns an object
+ for use as a dictionary key.
+
+ The keyfunc will be called every time the ORM needs to add a member by
+ value-only (such as when loading instances from the database) or
+ remove a member. The usual cautions about dictionary keying apply-
+ ``keyfunc(object)`` should return the same output for the life of the
+ collection. Keying based on mutable properties can result in
+ unreachable instances "lost" in the collection.
+
+ """
+ self.keyfunc = keyfunc
+
+ @collection.appender
+ @collection.internally_instrumented
+ def set(self, value, _sa_initiator=None):
+ """Add an item by value, consulting the keyfunc for the key."""
+
+ key = self.keyfunc(value)
+ self.__setitem__(key, value, _sa_initiator)
+
+ @collection.remover
+ @collection.internally_instrumented
+ def remove(self, value, _sa_initiator=None):
+ """Remove an item by value, consulting the keyfunc for the key."""
+
+ key = self.keyfunc(value)
+ # Let self[key] raise if key is not in this collection
+ # testlib.pragma exempt:__ne__
+ if self[key] != value:
+ raise sa_exc.InvalidRequestError(
+ "Can not remove '%s': collection holds '%s' for key '%s'. "
+ "Possible cause: is the MappedCollection key function "
+ "based on mutable properties or properties that only obtain "
+ "values after flush?" % (value, self[key], key)
+ )
+ self.__delitem__(key, _sa_initiator)
+
+
+# ensure instrumentation is associated with
+# these built-in classes; if a user-defined class
+# subclasses these and uses @internally_instrumented,
+# the superclass is otherwise not instrumented.
+# see [ticket:2406].
+_instrument_class(MappedCollection)
+_instrument_class(InstrumentedList)
+_instrument_class(InstrumentedSet)
diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py
new file mode 100644
index 0000000..9d4f652
--- /dev/null
+++ b/lib/sqlalchemy/orm/context.py
@@ -0,0 +1,3136 @@
+# orm/context.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+import itertools
+
+from . import attributes
+from . import interfaces
+from . import loading
+from .base import _is_aliased_class
+from .interfaces import ORMColumnsClauseRole
+from .path_registry import PathRegistry
+from .util import _entity_corresponds_to
+from .util import _ORMJoin
+from .util import aliased
+from .util import Bundle
+from .util import ORMAdapter
+from .. import exc as sa_exc
+from .. import future
+from .. import inspect
+from .. import sql
+from .. import util
+from ..sql import ClauseElement
+from ..sql import coercions
+from ..sql import expression
+from ..sql import roles
+from ..sql import util as sql_util
+from ..sql import visitors
+from ..sql.base import _entity_namespace_key
+from ..sql.base import _select_iterables
+from ..sql.base import CacheableOptions
+from ..sql.base import CompileState
+from ..sql.base import Options
+from ..sql.selectable import LABEL_STYLE_DISAMBIGUATE_ONLY
+from ..sql.selectable import LABEL_STYLE_NONE
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+from ..sql.selectable import SelectState
+from ..sql.visitors import ExtendedInternalTraversal
+from ..sql.visitors import InternalTraversal
+
+_path_registry = PathRegistry.root
+
+_EMPTY_DICT = util.immutabledict()
+
+
+LABEL_STYLE_LEGACY_ORM = util.symbol("LABEL_STYLE_LEGACY_ORM")
+
+
+class QueryContext(object):
+ __slots__ = (
+ "compile_state",
+ "query",
+ "params",
+ "load_options",
+ "bind_arguments",
+ "execution_options",
+ "session",
+ "autoflush",
+ "populate_existing",
+ "invoke_all_eagers",
+ "version_check",
+ "refresh_state",
+ "create_eager_joins",
+ "propagated_loader_options",
+ "attributes",
+ "runid",
+ "partials",
+ "post_load_paths",
+ "identity_token",
+ "yield_per",
+ "loaders_require_buffering",
+ "loaders_require_uniquing",
+ )
+
+ class default_load_options(Options):
+ _only_return_tuples = False
+ _populate_existing = False
+ _version_check = False
+ _invoke_all_eagers = True
+ _autoflush = True
+ _refresh_identity_token = None
+ _yield_per = None
+ _refresh_state = None
+ _lazy_loaded_from = None
+ _legacy_uniquing = False
+
+ def __init__(
+ self,
+ compile_state,
+ statement,
+ params,
+ session,
+ load_options,
+ execution_options=None,
+ bind_arguments=None,
+ ):
+ self.load_options = load_options
+ self.execution_options = execution_options or _EMPTY_DICT
+ self.bind_arguments = bind_arguments or _EMPTY_DICT
+ self.compile_state = compile_state
+ self.query = statement
+ self.session = session
+ self.loaders_require_buffering = False
+ self.loaders_require_uniquing = False
+ self.params = params
+
+ self.propagated_loader_options = {
+ # issue 7447.
+ # propagated loader options will be present on loaded InstanceState
+ # objects under state.load_options and are typically used by
+ # LazyLoader to apply options to the SELECT statement it emits.
+ # For compile state options (i.e. loader strategy options), these
+ # need to line up with the ".load_path" attribute which in
+ # loader.py is pulled from context.compile_state.current_path.
+ # so, this means these options have to be the ones from the
+ # *cached* statement that's travelling with compile_state, not the
+ # *current* statement which won't match up for an ad-hoc
+ # AliasedClass
+ cached_o
+ for cached_o in compile_state.select_statement._with_options
+ if cached_o.propagate_to_loaders and cached_o._is_compile_state
+ } | {
+ # for user defined loader options that are not "compile state",
+ # those just need to be present as they are
+ uncached_o
+ for uncached_o in statement._with_options
+ if uncached_o.propagate_to_loaders
+ and not uncached_o._is_compile_state
+ }
+
+ self.attributes = dict(compile_state.attributes)
+
+ self.autoflush = load_options._autoflush
+ self.populate_existing = load_options._populate_existing
+ self.invoke_all_eagers = load_options._invoke_all_eagers
+ self.version_check = load_options._version_check
+ self.refresh_state = load_options._refresh_state
+ self.yield_per = load_options._yield_per
+ self.identity_token = load_options._refresh_identity_token
+
+ if self.yield_per and compile_state._no_yield_pers:
+ raise sa_exc.InvalidRequestError(
+ "The yield_per Query option is currently not "
+ "compatible with %s eager loading. Please "
+ "specify lazyload('*') or query.enable_eagerloads(False) in "
+ "order to "
+ "proceed with query.yield_per()."
+ % ", ".join(compile_state._no_yield_pers)
+ )
+
+
+_orm_load_exec_options = util.immutabledict(
+ {"_result_disable_adapt_to_context": True, "future_result": True}
+)
+
+
+class ORMCompileState(CompileState):
+ # note this is a dictionary, but the
+ # default_compile_options._with_polymorphic_adapt_map is a tuple
+ _with_polymorphic_adapt_map = _EMPTY_DICT
+
+ class default_compile_options(CacheableOptions):
+ _cache_key_traversal = [
+ ("_use_legacy_query_style", InternalTraversal.dp_boolean),
+ ("_for_statement", InternalTraversal.dp_boolean),
+ ("_bake_ok", InternalTraversal.dp_boolean),
+ (
+ "_with_polymorphic_adapt_map",
+ ExtendedInternalTraversal.dp_has_cache_key_tuples,
+ ),
+ ("_current_path", InternalTraversal.dp_has_cache_key),
+ ("_enable_single_crit", InternalTraversal.dp_boolean),
+ ("_enable_eagerloads", InternalTraversal.dp_boolean),
+ ("_orm_only_from_obj_alias", InternalTraversal.dp_boolean),
+ ("_only_load_props", InternalTraversal.dp_plain_obj),
+ ("_set_base_alias", InternalTraversal.dp_boolean),
+ ("_for_refresh_state", InternalTraversal.dp_boolean),
+ ("_render_for_subquery", InternalTraversal.dp_boolean),
+ ("_is_star", InternalTraversal.dp_boolean),
+ ]
+
+ # set to True by default from Query._statement_20(), to indicate
+ # the rendered query should look like a legacy ORM query. right
+ # now this basically indicates we should use tablename_columnname
+ # style labels. Generally indicates the statement originated
+ # from a Query object.
+ _use_legacy_query_style = False
+
+ # set *only* when we are coming from the Query.statement
+ # accessor, or a Query-level equivalent such as
+ # query.subquery(). this supersedes "toplevel".
+ _for_statement = False
+
+ _bake_ok = True
+ _with_polymorphic_adapt_map = ()
+ _current_path = _path_registry
+ _enable_single_crit = True
+ _enable_eagerloads = True
+ _orm_only_from_obj_alias = True
+ _only_load_props = None
+ _set_base_alias = False
+ _for_refresh_state = False
+ _render_for_subquery = False
+ _is_star = False
+
+ current_path = _path_registry
+
+ def __init__(self, *arg, **kw):
+ raise NotImplementedError()
+
+ def _append_dedupe_col_collection(self, obj, col_collection):
+ dedupe = self.dedupe_columns
+ if obj not in dedupe:
+ dedupe.add(obj)
+ col_collection.append(obj)
+
+ @classmethod
+ def _column_naming_convention(cls, label_style, legacy):
+
+ if legacy:
+
+ def name(col, col_name=None):
+ if col_name:
+ return col_name
+ else:
+ return getattr(col, "key")
+
+ return name
+ else:
+ return SelectState._column_naming_convention(label_style)
+
+ @classmethod
+ def create_for_statement(cls, statement_container, compiler, **kw):
+ """Create a context for a statement given a :class:`.Compiler`.
+
+ This method is always invoked in the context of SQLCompiler.process().
+
+ For a Select object, this would be invoked from
+ SQLCompiler.visit_select(). For the special FromStatement object used
+ by Query to indicate "Query.from_statement()", this is called by
+ FromStatement._compiler_dispatch() that would be called by
+ SQLCompiler.process().
+
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def get_column_descriptions(cls, statement):
+ return _column_descriptions(statement)
+
+ @classmethod
+ def orm_pre_session_exec(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ is_reentrant_invoke,
+ ):
+ if is_reentrant_invoke:
+ return statement, execution_options
+
+ (
+ load_options,
+ execution_options,
+ ) = QueryContext.default_load_options.from_execution_options(
+ "_sa_orm_load_options",
+ {"populate_existing", "autoflush", "yield_per"},
+ execution_options,
+ statement._execution_options,
+ )
+
+ # default execution options for ORM results:
+ # 1. _result_disable_adapt_to_context=True
+ # this will disable the ResultSetMetadata._adapt_to_context()
+ # step which we don't need, as we have result processors cached
+ # against the original SELECT statement before caching.
+ # 2. future_result=True. The ORM should **never** resolve columns
+ # in a result set based on names, only on Column objects that
+ # are correctly adapted to the context. W the legacy result
+ # it will still attempt name-based resolution and also emit a
+ # warning.
+ if not execution_options:
+ execution_options = _orm_load_exec_options
+ else:
+ execution_options = execution_options.union(_orm_load_exec_options)
+
+ if load_options._yield_per:
+ execution_options = execution_options.union(
+ {"yield_per": load_options._yield_per}
+ )
+
+ bind_arguments["clause"] = statement
+
+ # new in 1.4 - the coercions system is leveraged to allow the
+ # "subject" mapper of a statement be propagated to the top
+ # as the statement is built. "subject" mapper is the generally
+ # standard object used as an identifier for multi-database schemes.
+
+ # we are here based on the fact that _propagate_attrs contains
+ # "compile_state_plugin": "orm". The "plugin_subject"
+ # needs to be present as well.
+
+ try:
+ plugin_subject = statement._propagate_attrs["plugin_subject"]
+ except KeyError:
+ assert False, "statement had 'orm' plugin but no plugin_subject"
+ else:
+ if plugin_subject:
+ bind_arguments["mapper"] = plugin_subject.mapper
+
+ if load_options._autoflush:
+ session._autoflush()
+
+ return statement, execution_options
+
+ @classmethod
+ def orm_setup_cursor_result(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ result,
+ ):
+ execution_context = result.context
+ compile_state = execution_context.compiled.compile_state
+
+ # cover edge case where ORM entities used in legacy select
+ # were passed to session.execute:
+ # session.execute(legacy_select([User.id, User.name]))
+ # see test_query->test_legacy_tuple_old_select
+
+ load_options = execution_options.get(
+ "_sa_orm_load_options", QueryContext.default_load_options
+ )
+ if compile_state.compile_options._is_star:
+ return result
+
+ querycontext = QueryContext(
+ compile_state,
+ statement,
+ params,
+ session,
+ load_options,
+ execution_options,
+ bind_arguments,
+ )
+ return loading.instances(result, querycontext)
+
+ @property
+ def _lead_mapper_entities(self):
+ """return all _MapperEntity objects in the lead entities collection.
+
+ Does **not** include entities that have been replaced by
+ with_entities(), with_only_columns()
+
+ """
+ return [
+ ent for ent in self._entities if isinstance(ent, _MapperEntity)
+ ]
+
+ def _create_with_polymorphic_adapter(self, ext_info, selectable):
+ if (
+ not ext_info.is_aliased_class
+ and ext_info.mapper.persist_selectable
+ not in self._polymorphic_adapters
+ ):
+ for mp in ext_info.mapper.iterate_to_root():
+ self._mapper_loads_polymorphically_with(
+ mp,
+ sql_util.ColumnAdapter(selectable, mp._equivalent_columns),
+ )
+
+ def _mapper_loads_polymorphically_with(self, mapper, adapter):
+ for m2 in mapper._with_polymorphic_mappers or [mapper]:
+ self._polymorphic_adapters[m2] = adapter
+ for m in m2.iterate_to_root(): # TODO: redundant ?
+ self._polymorphic_adapters[m.local_table] = adapter
+
+ @classmethod
+ def _create_entities_collection(cls, query, legacy):
+ raise NotImplementedError(
+ "this method only works for ORMSelectCompileState"
+ )
+
+
+@sql.base.CompileState.plugin_for("orm", "orm_from_statement")
+class ORMFromStatementCompileState(ORMCompileState):
+ _aliased_generations = util.immutabledict()
+ _from_obj_alias = None
+ _has_mapper_entities = False
+
+ _has_orm_entities = False
+ multi_row_eager_loaders = False
+ compound_eager_adapter = None
+
+ extra_criteria_entities = _EMPTY_DICT
+ eager_joins = _EMPTY_DICT
+
+ @classmethod
+ def create_for_statement(cls, statement_container, compiler, **kw):
+
+ if compiler is not None:
+ toplevel = not compiler.stack
+ else:
+ toplevel = True
+
+ self = cls.__new__(cls)
+ self._primary_entity = None
+
+ self.use_legacy_query_style = (
+ statement_container._compile_options._use_legacy_query_style
+ )
+ self.statement_container = self.select_statement = statement_container
+ self.requested_statement = statement = statement_container.element
+
+ if statement.is_dml:
+ self.dml_table = statement.table
+
+ self._entities = []
+ self._polymorphic_adapters = {}
+ self._no_yield_pers = set()
+
+ self.compile_options = statement_container._compile_options
+
+ if (
+ self.use_legacy_query_style
+ and isinstance(statement, expression.SelectBase)
+ and not statement._is_textual
+ and not statement.is_dml
+ and statement._label_style is LABEL_STYLE_NONE
+ ):
+ self.statement = statement.set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ )
+ else:
+ self.statement = statement
+
+ self._label_convention = self._column_naming_convention(
+ statement._label_style
+ if not statement._is_textual and not statement.is_dml
+ else LABEL_STYLE_NONE,
+ self.use_legacy_query_style,
+ )
+
+ _QueryEntity.to_compile_state(
+ self,
+ statement_container._raw_columns,
+ self._entities,
+ is_current_entities=True,
+ )
+
+ self.current_path = statement_container._compile_options._current_path
+
+ if toplevel and statement_container._with_options:
+ self.attributes = {"_unbound_load_dedupes": set()}
+ self.global_attributes = compiler._global_attributes
+
+ for opt in statement_container._with_options:
+ if opt._is_compile_state:
+ opt.process_compile_state(self)
+
+ else:
+ self.attributes = {}
+ self.global_attributes = compiler._global_attributes
+
+ if statement_container._with_context_options:
+ for fn, key in statement_container._with_context_options:
+ fn(self)
+
+ self.primary_columns = []
+ self.secondary_columns = []
+ self.dedupe_columns = set()
+ self.create_eager_joins = []
+ self._fallback_from_clauses = []
+
+ self.order_by = None
+
+ if isinstance(
+ self.statement, (expression.TextClause, expression.UpdateBase)
+ ):
+
+ self.extra_criteria_entities = {}
+
+ # setup for all entities. Currently, this is not useful
+ # for eager loaders, as the eager loaders that work are able
+ # to do their work entirely in row_processor.
+ for entity in self._entities:
+ entity.setup_compile_state(self)
+
+ # we did the setup just to get primary columns.
+ self.statement = _AdHocColumnsStatement(
+ self.statement, self.primary_columns
+ )
+ else:
+ # allow TextualSelect with implicit columns as well
+ # as select() with ad-hoc columns, see test_query::TextTest
+ self._from_obj_alias = sql.util.ColumnAdapter(
+ self.statement, adapt_on_names=True
+ )
+ # set up for eager loaders, however if we fix subqueryload
+ # it should not need to do this here. the model of eager loaders
+ # that can work entirely in row_processor might be interesting
+ # here though subqueryloader has a lot of upfront work to do
+ # see test/orm/test_query.py -> test_related_eagerload_against_text
+ # for where this part makes a difference. would rather have
+ # subqueryload figure out what it needs more intelligently.
+ # for entity in self._entities:
+ # entity.setup_compile_state(self)
+
+ return self
+
+ def _adapt_col_list(self, cols, current_adapter):
+ return cols
+
+ def _get_current_adapter(self):
+ return None
+
+
+class _AdHocColumnsStatement(ClauseElement):
+ """internal object created to somewhat act like a SELECT when we
+ are selecting columns from a DML RETURNING.
+
+
+ """
+
+ __visit_name__ = None
+
+ def __init__(self, text, columns):
+ self.element = text
+ self.column_args = [
+ coercions.expect(roles.ColumnsClauseRole, c) for c in columns
+ ]
+
+ def _generate_cache_key(self):
+ raise NotImplementedError()
+
+ def _gen_cache_key(self, anon_map, bindparams):
+ raise NotImplementedError()
+
+ def _compiler_dispatch(
+ self, compiler, compound_index=None, asfrom=False, **kw
+ ):
+ """provide a fixed _compiler_dispatch method."""
+
+ toplevel = not compiler.stack
+ entry = (
+ compiler._default_stack_entry if toplevel else compiler.stack[-1]
+ )
+
+ populate_result_map = (
+ toplevel
+ # these two might not be needed
+ or (
+ compound_index == 0
+ and entry.get("need_result_map_for_compound", False)
+ )
+ or entry.get("need_result_map_for_nested", False)
+ )
+
+ if populate_result_map:
+ compiler._ordered_columns = (
+ compiler._textual_ordered_columns
+ ) = False
+
+ # enable looser result column matching. this is shown to be
+ # needed by test_query.py::TextTest
+ compiler._loose_column_name_matching = True
+
+ for c in self.column_args:
+ compiler.process(
+ c,
+ within_columns_clause=True,
+ add_to_result_map=compiler._add_to_result_map,
+ )
+ return compiler.process(self.element, **kw)
+
+
+@sql.base.CompileState.plugin_for("orm", "select")
+class ORMSelectCompileState(ORMCompileState, SelectState):
+ _joinpath = _joinpoint = _EMPTY_DICT
+
+ _memoized_entities = _EMPTY_DICT
+
+ _from_obj_alias = None
+ _has_mapper_entities = False
+
+ _has_orm_entities = False
+ multi_row_eager_loaders = False
+ compound_eager_adapter = None
+
+ correlate = None
+ correlate_except = None
+ _where_criteria = ()
+ _having_criteria = ()
+
+ @classmethod
+ def create_for_statement(cls, statement, compiler, **kw):
+ """compiler hook, we arrive here from compiler.visit_select() only."""
+
+ self = cls.__new__(cls)
+
+ if compiler is not None:
+ toplevel = not compiler.stack
+ self.global_attributes = compiler._global_attributes
+ else:
+ toplevel = True
+ self.global_attributes = {}
+
+ select_statement = statement
+
+ # if we are a select() that was never a legacy Query, we won't
+ # have ORM level compile options.
+ statement._compile_options = cls.default_compile_options.safe_merge(
+ statement._compile_options
+ )
+
+ if select_statement._execution_options:
+ # execution options should not impact the compilation of a
+ # query, and at the moment subqueryloader is putting some things
+ # in here that we explicitly don't want stuck in a cache.
+ self.select_statement = select_statement._clone()
+ self.select_statement._execution_options = util.immutabledict()
+ else:
+ self.select_statement = select_statement
+
+ # indicates this select() came from Query.statement
+ self.for_statement = select_statement._compile_options._for_statement
+
+ # generally if we are from Query or directly from a select()
+ self.use_legacy_query_style = (
+ select_statement._compile_options._use_legacy_query_style
+ )
+
+ self._entities = []
+ self._primary_entity = None
+ self._aliased_generations = {}
+ self._polymorphic_adapters = {}
+ self._no_yield_pers = set()
+
+ # legacy: only for query.with_polymorphic()
+ if select_statement._compile_options._with_polymorphic_adapt_map:
+ self._with_polymorphic_adapt_map = dict(
+ select_statement._compile_options._with_polymorphic_adapt_map
+ )
+ self._setup_with_polymorphics()
+
+ self.compile_options = select_statement._compile_options
+
+ if not toplevel:
+ # for subqueries, turn off eagerloads and set
+ # "render_for_subquery".
+ self.compile_options += {
+ "_enable_eagerloads": False,
+ "_render_for_subquery": True,
+ }
+
+ # determine label style. we can make different decisions here.
+ # at the moment, trying to see if we can always use DISAMBIGUATE_ONLY
+ # rather than LABEL_STYLE_NONE, and if we can use disambiguate style
+ # for new style ORM selects too.
+ if (
+ self.use_legacy_query_style
+ and self.select_statement._label_style is LABEL_STYLE_LEGACY_ORM
+ ):
+ if not self.for_statement:
+ self.label_style = LABEL_STYLE_TABLENAME_PLUS_COL
+ else:
+ self.label_style = LABEL_STYLE_DISAMBIGUATE_ONLY
+ else:
+ self.label_style = self.select_statement._label_style
+
+ if select_statement._memoized_select_entities:
+ self._memoized_entities = {
+ memoized_entities: _QueryEntity.to_compile_state(
+ self,
+ memoized_entities._raw_columns,
+ [],
+ is_current_entities=False,
+ )
+ for memoized_entities in (
+ select_statement._memoized_select_entities
+ )
+ }
+
+ # label_convention is stateful and will yield deduping keys if it
+ # sees the same key twice. therefore it's important that it is not
+ # invoked for the above "memoized" entities that aren't actually
+ # in the columns clause
+ self._label_convention = self._column_naming_convention(
+ statement._label_style, self.use_legacy_query_style
+ )
+
+ _QueryEntity.to_compile_state(
+ self,
+ select_statement._raw_columns,
+ self._entities,
+ is_current_entities=True,
+ )
+
+ self.current_path = select_statement._compile_options._current_path
+
+ self.eager_order_by = ()
+
+ if toplevel and (
+ select_statement._with_options
+ or select_statement._memoized_select_entities
+ ):
+ self.attributes = {"_unbound_load_dedupes": set()}
+
+ for (
+ memoized_entities
+ ) in select_statement._memoized_select_entities:
+ for opt in memoized_entities._with_options:
+ if opt._is_compile_state:
+ opt.process_compile_state_replaced_entities(
+ self,
+ [
+ ent
+ for ent in self._memoized_entities[
+ memoized_entities
+ ]
+ if isinstance(ent, _MapperEntity)
+ ],
+ )
+
+ for opt in self.select_statement._with_options:
+ if opt._is_compile_state:
+ opt.process_compile_state(self)
+ else:
+ self.attributes = {}
+
+ if select_statement._with_context_options:
+ for fn, key in select_statement._with_context_options:
+ fn(self)
+
+ self.primary_columns = []
+ self.secondary_columns = []
+ self.dedupe_columns = set()
+ self.eager_joins = {}
+ self.extra_criteria_entities = {}
+ self.create_eager_joins = []
+ self._fallback_from_clauses = []
+
+ # normalize the FROM clauses early by themselves, as this makes
+ # it an easier job when we need to assemble a JOIN onto these,
+ # for select.join() as well as joinedload(). As of 1.4 there are now
+ # potentially more complex sets of FROM objects here as the use
+ # of lambda statements for lazyload, load_on_pk etc. uses more
+ # cloning of the select() construct. See #6495
+ self.from_clauses = self._normalize_froms(
+ info.selectable for info in select_statement._from_obj
+ )
+
+ # this is a fairly arbitrary break into a second method,
+ # so it might be nicer to break up create_for_statement()
+ # and _setup_for_generate into three or four logical sections
+ self._setup_for_generate()
+
+ SelectState.__init__(self, self.statement, compiler, **kw)
+
+ return self
+
+ def _setup_for_generate(self):
+ query = self.select_statement
+
+ self.statement = None
+ self._join_entities = ()
+
+ if self.compile_options._set_base_alias:
+ self._set_select_from_alias()
+
+ for memoized_entities in query._memoized_select_entities:
+ if memoized_entities._setup_joins:
+ self._join(
+ memoized_entities._setup_joins,
+ self._memoized_entities[memoized_entities],
+ )
+ if memoized_entities._legacy_setup_joins:
+ self._legacy_join(
+ memoized_entities._legacy_setup_joins,
+ self._memoized_entities[memoized_entities],
+ )
+
+ if query._setup_joins:
+ self._join(query._setup_joins, self._entities)
+
+ if query._legacy_setup_joins:
+ self._legacy_join(query._legacy_setup_joins, self._entities)
+
+ current_adapter = self._get_current_adapter()
+
+ if query._where_criteria:
+ self._where_criteria = query._where_criteria
+
+ if current_adapter:
+ self._where_criteria = tuple(
+ current_adapter(crit, True)
+ for crit in self._where_criteria
+ )
+
+ # TODO: some complexity with order_by here was due to mapper.order_by.
+ # now that this is removed we can hopefully make order_by /
+ # group_by act identically to how they are in Core select.
+ self.order_by = (
+ self._adapt_col_list(query._order_by_clauses, current_adapter)
+ if current_adapter and query._order_by_clauses not in (None, False)
+ else query._order_by_clauses
+ )
+
+ if query._having_criteria:
+ self._having_criteria = tuple(
+ current_adapter(crit, True) if current_adapter else crit
+ for crit in query._having_criteria
+ )
+
+ self.group_by = (
+ self._adapt_col_list(
+ util.flatten_iterator(query._group_by_clauses), current_adapter
+ )
+ if current_adapter and query._group_by_clauses not in (None, False)
+ else query._group_by_clauses or None
+ )
+
+ if self.eager_order_by:
+ adapter = self.from_clauses[0]._target_adapter
+ self.eager_order_by = adapter.copy_and_process(self.eager_order_by)
+
+ if query._distinct_on:
+ self.distinct_on = self._adapt_col_list(
+ query._distinct_on, current_adapter
+ )
+ else:
+ self.distinct_on = ()
+
+ self.distinct = query._distinct
+
+ if query._correlate:
+ # ORM mapped entities that are mapped to joins can be passed
+ # to .correlate, so here they are broken into their component
+ # tables.
+ self.correlate = tuple(
+ util.flatten_iterator(
+ sql_util.surface_selectables(s) if s is not None else None
+ for s in query._correlate
+ )
+ )
+ elif query._correlate_except is not None:
+ self.correlate_except = tuple(
+ util.flatten_iterator(
+ sql_util.surface_selectables(s) if s is not None else None
+ for s in query._correlate_except
+ )
+ )
+ elif not query._auto_correlate:
+ self.correlate = (None,)
+
+ # PART II
+
+ self._for_update_arg = query._for_update_arg
+
+ if self.compile_options._is_star and (len(self._entities) != 1):
+ raise sa_exc.CompileError(
+ "Can't generate ORM query that includes multiple expressions "
+ "at the same time as '*'; query for '*' alone if present"
+ )
+ for entity in self._entities:
+ entity.setup_compile_state(self)
+
+ for rec in self.create_eager_joins:
+ strategy = rec[0]
+ strategy(self, *rec[1:])
+
+ # else "load from discrete FROMs" mode,
+ # i.e. when each _MappedEntity has its own FROM
+
+ if self.compile_options._enable_single_crit:
+ self._adjust_for_extra_criteria()
+
+ if not self.primary_columns:
+ if self.compile_options._only_load_props:
+ raise sa_exc.InvalidRequestError(
+ "No column-based properties specified for "
+ "refresh operation. Use session.expire() "
+ "to reload collections and related items."
+ )
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Query contains no columns with which to SELECT from."
+ )
+
+ if not self.from_clauses:
+ self.from_clauses = list(self._fallback_from_clauses)
+
+ if self.order_by is False:
+ self.order_by = None
+
+ if self.multi_row_eager_loaders and self._should_nest_selectable:
+ self.statement = self._compound_eager_statement()
+ else:
+ self.statement = self._simple_statement()
+
+ if self.for_statement:
+ ezero = self._mapper_zero()
+ if ezero is not None:
+ # TODO: this goes away once we get rid of the deep entity
+ # thing
+ self.statement = self.statement._annotate(
+ {"deepentity": ezero}
+ )
+
+ @classmethod
+ def _create_entities_collection(cls, query, legacy):
+ """Creates a partial ORMSelectCompileState that includes
+ the full collection of _MapperEntity and other _QueryEntity objects.
+
+ Supports a few remaining use cases that are pre-compilation
+ but still need to gather some of the column / adaption information.
+
+ """
+ self = cls.__new__(cls)
+
+ self._entities = []
+ self._primary_entity = None
+ self._aliased_generations = {}
+ self._polymorphic_adapters = {}
+
+ compile_options = cls.default_compile_options.safe_merge(
+ query._compile_options
+ )
+ # legacy: only for query.with_polymorphic()
+ if compile_options._with_polymorphic_adapt_map:
+ self._with_polymorphic_adapt_map = dict(
+ compile_options._with_polymorphic_adapt_map
+ )
+ self._setup_with_polymorphics()
+
+ self._label_convention = self._column_naming_convention(
+ query._label_style, legacy
+ )
+
+ # entities will also set up polymorphic adapters for mappers
+ # that have with_polymorphic configured
+ _QueryEntity.to_compile_state(
+ self, query._raw_columns, self._entities, is_current_entities=True
+ )
+ return self
+
+ @classmethod
+ def determine_last_joined_entity(cls, statement):
+ setup_joins = statement._setup_joins
+
+ if not setup_joins:
+ return None
+
+ (target, onclause, from_, flags) = setup_joins[-1]
+
+ if isinstance(target, interfaces.PropComparator):
+ return target.entity
+ else:
+ return target
+
+ @classmethod
+ def all_selected_columns(cls, statement):
+ for element in statement._raw_columns:
+ if (
+ element.is_selectable
+ and "entity_namespace" in element._annotations
+ ):
+ ens = element._annotations["entity_namespace"]
+ if not ens.is_mapper and not ens.is_aliased_class:
+ for elem in _select_iterables([element]):
+ yield elem
+ else:
+ for elem in _select_iterables(ens._all_column_expressions):
+ yield elem
+ else:
+ for elem in _select_iterables([element]):
+ yield elem
+
+ @classmethod
+ def get_columns_clause_froms(cls, statement):
+ return cls._normalize_froms(
+ itertools.chain.from_iterable(
+ element._from_objects
+ if "parententity" not in element._annotations
+ else [
+ element._annotations["parententity"].__clause_element__()
+ ]
+ for element in statement._raw_columns
+ )
+ )
+
+ @classmethod
+ @util.preload_module("sqlalchemy.orm.query")
+ def from_statement(cls, statement, from_statement):
+ query = util.preloaded.orm_query
+
+ from_statement = coercions.expect(
+ roles.ReturnsRowsRole,
+ from_statement,
+ apply_propagate_attrs=statement,
+ )
+
+ stmt = query.FromStatement(statement._raw_columns, from_statement)
+
+ stmt.__dict__.update(
+ _with_options=statement._with_options,
+ _with_context_options=statement._with_context_options,
+ _execution_options=statement._execution_options,
+ _propagate_attrs=statement._propagate_attrs,
+ )
+ return stmt
+
+ def _setup_with_polymorphics(self):
+ # legacy: only for query.with_polymorphic()
+ for ext_info, wp in self._with_polymorphic_adapt_map.items():
+ self._mapper_loads_polymorphically_with(ext_info, wp._adapter)
+
+ def _set_select_from_alias(self):
+
+ query = self.select_statement # query
+
+ assert self.compile_options._set_base_alias
+ assert len(query._from_obj) == 1
+
+ adapter = self._get_select_from_alias_from_obj(query._from_obj[0])
+ if adapter:
+ self.compile_options += {"_enable_single_crit": False}
+ self._from_obj_alias = adapter
+
+ def _get_select_from_alias_from_obj(self, from_obj):
+ info = from_obj
+
+ if "parententity" in info._annotations:
+ info = info._annotations["parententity"]
+
+ if hasattr(info, "mapper"):
+ if not info.is_aliased_class:
+ raise sa_exc.ArgumentError(
+ "A selectable (FromClause) instance is "
+ "expected when the base alias is being set."
+ )
+ else:
+ return info._adapter
+
+ elif isinstance(info.selectable, sql.selectable.AliasedReturnsRows):
+ equivs = self._all_equivs()
+ return sql_util.ColumnAdapter(info, equivs)
+ else:
+ return None
+
+ def _mapper_zero(self):
+ """return the Mapper associated with the first QueryEntity."""
+ return self._entities[0].mapper
+
+ def _entity_zero(self):
+ """Return the 'entity' (mapper or AliasedClass) associated
+ with the first QueryEntity, or alternatively the 'select from'
+ entity if specified."""
+
+ for ent in self.from_clauses:
+ if "parententity" in ent._annotations:
+ return ent._annotations["parententity"]
+ for qent in self._entities:
+ if qent.entity_zero:
+ return qent.entity_zero
+
+ return None
+
+ def _only_full_mapper_zero(self, methname):
+ if self._entities != [self._primary_entity]:
+ raise sa_exc.InvalidRequestError(
+ "%s() can only be used against "
+ "a single mapped class." % methname
+ )
+ return self._primary_entity.entity_zero
+
+ def _only_entity_zero(self, rationale=None):
+ if len(self._entities) > 1:
+ raise sa_exc.InvalidRequestError(
+ rationale
+ or "This operation requires a Query "
+ "against a single mapper."
+ )
+ return self._entity_zero()
+
+ def _all_equivs(self):
+ equivs = {}
+
+ for memoized_entities in self._memoized_entities.values():
+ for ent in [
+ ent
+ for ent in memoized_entities
+ if isinstance(ent, _MapperEntity)
+ ]:
+ equivs.update(ent.mapper._equivalent_columns)
+
+ for ent in [
+ ent for ent in self._entities if isinstance(ent, _MapperEntity)
+ ]:
+ equivs.update(ent.mapper._equivalent_columns)
+ return equivs
+
+ def _compound_eager_statement(self):
+ # for eager joins present and LIMIT/OFFSET/DISTINCT,
+ # wrap the query inside a select,
+ # then append eager joins onto that
+
+ if self.order_by:
+ # the default coercion for ORDER BY is now the OrderByRole,
+ # which adds an additional post coercion to ByOfRole in that
+ # elements are converted into label references. For the
+ # eager load / subquery wrapping case, we need to un-coerce
+ # the original expressions outside of the label references
+ # in order to have them render.
+ unwrapped_order_by = [
+ elem.element
+ if isinstance(elem, sql.elements._label_reference)
+ else elem
+ for elem in self.order_by
+ ]
+
+ order_by_col_expr = sql_util.expand_column_list_from_order_by(
+ self.primary_columns, unwrapped_order_by
+ )
+ else:
+ order_by_col_expr = []
+ unwrapped_order_by = None
+
+ # put FOR UPDATE on the inner query, where MySQL will honor it,
+ # as well as if it has an OF so PostgreSQL can use it.
+ inner = self._select_statement(
+ self.primary_columns
+ + [c for c in order_by_col_expr if c not in self.dedupe_columns],
+ self.from_clauses,
+ self._where_criteria,
+ self._having_criteria,
+ self.label_style,
+ self.order_by,
+ for_update=self._for_update_arg,
+ hints=self.select_statement._hints,
+ statement_hints=self.select_statement._statement_hints,
+ correlate=self.correlate,
+ correlate_except=self.correlate_except,
+ **self._select_args
+ )
+
+ inner = inner.alias()
+
+ equivs = self._all_equivs()
+
+ self.compound_eager_adapter = sql_util.ColumnAdapter(inner, equivs)
+
+ statement = future.select(
+ *([inner] + self.secondary_columns) # use_labels=self.labels
+ )
+ statement._label_style = self.label_style
+
+ # Oracle however does not allow FOR UPDATE on the subquery,
+ # and the Oracle dialect ignores it, plus for PostgreSQL, MySQL
+ # we expect that all elements of the row are locked, so also put it
+ # on the outside (except in the case of PG when OF is used)
+ if (
+ self._for_update_arg is not None
+ and self._for_update_arg.of is None
+ ):
+ statement._for_update_arg = self._for_update_arg
+
+ from_clause = inner
+ for eager_join in self.eager_joins.values():
+ # EagerLoader places a 'stop_on' attribute on the join,
+ # giving us a marker as to where the "splice point" of
+ # the join should be
+ from_clause = sql_util.splice_joins(
+ from_clause, eager_join, eager_join.stop_on
+ )
+
+ statement.select_from.non_generative(statement, from_clause)
+
+ if unwrapped_order_by:
+ statement.order_by.non_generative(
+ statement,
+ *self.compound_eager_adapter.copy_and_process(
+ unwrapped_order_by
+ )
+ )
+
+ statement.order_by.non_generative(statement, *self.eager_order_by)
+ return statement
+
+ def _simple_statement(self):
+
+ if (
+ self.compile_options._use_legacy_query_style
+ and (self.distinct and not self.distinct_on)
+ and self.order_by
+ ):
+ to_add = sql_util.expand_column_list_from_order_by(
+ self.primary_columns, self.order_by
+ )
+ if to_add:
+ util.warn_deprecated_20(
+ "ORDER BY columns added implicitly due to "
+ "DISTINCT is deprecated and will be removed in "
+ "SQLAlchemy 2.0. SELECT statements with DISTINCT "
+ "should be written to explicitly include the appropriate "
+ "columns in the columns clause"
+ )
+ self.primary_columns += to_add
+
+ statement = self._select_statement(
+ self.primary_columns + self.secondary_columns,
+ tuple(self.from_clauses) + tuple(self.eager_joins.values()),
+ self._where_criteria,
+ self._having_criteria,
+ self.label_style,
+ self.order_by,
+ for_update=self._for_update_arg,
+ hints=self.select_statement._hints,
+ statement_hints=self.select_statement._statement_hints,
+ correlate=self.correlate,
+ correlate_except=self.correlate_except,
+ **self._select_args
+ )
+
+ if self.eager_order_by:
+ statement.order_by.non_generative(statement, *self.eager_order_by)
+ return statement
+
+ def _select_statement(
+ self,
+ raw_columns,
+ from_obj,
+ where_criteria,
+ having_criteria,
+ label_style,
+ order_by,
+ for_update,
+ hints,
+ statement_hints,
+ correlate,
+ correlate_except,
+ limit_clause,
+ offset_clause,
+ fetch_clause,
+ fetch_clause_options,
+ distinct,
+ distinct_on,
+ prefixes,
+ suffixes,
+ group_by,
+ ):
+
+ Select = future.Select
+ statement = Select._create_raw_select(
+ _raw_columns=raw_columns,
+ _from_obj=from_obj,
+ _label_style=label_style,
+ )
+
+ if where_criteria:
+ statement._where_criteria = where_criteria
+ if having_criteria:
+ statement._having_criteria = having_criteria
+
+ if order_by:
+ statement._order_by_clauses += tuple(order_by)
+
+ if distinct_on:
+ statement.distinct.non_generative(statement, *distinct_on)
+ elif distinct:
+ statement.distinct.non_generative(statement)
+
+ if group_by:
+ statement._group_by_clauses += tuple(group_by)
+
+ statement._limit_clause = limit_clause
+ statement._offset_clause = offset_clause
+ statement._fetch_clause = fetch_clause
+ statement._fetch_clause_options = fetch_clause_options
+
+ if prefixes:
+ statement._prefixes = prefixes
+
+ if suffixes:
+ statement._suffixes = suffixes
+
+ statement._for_update_arg = for_update
+
+ if hints:
+ statement._hints = hints
+ if statement_hints:
+ statement._statement_hints = statement_hints
+
+ if correlate:
+ statement.correlate.non_generative(statement, *correlate)
+
+ if correlate_except is not None:
+ statement.correlate_except.non_generative(
+ statement, *correlate_except
+ )
+
+ return statement
+
+ def _adapt_polymorphic_element(self, element):
+ if "parententity" in element._annotations:
+ search = element._annotations["parententity"]
+ alias = self._polymorphic_adapters.get(search, None)
+ if alias:
+ return alias.adapt_clause(element)
+
+ if isinstance(element, expression.FromClause):
+ search = element
+ elif hasattr(element, "table"):
+ search = element.table
+ else:
+ return None
+
+ alias = self._polymorphic_adapters.get(search, None)
+ if alias:
+ return alias.adapt_clause(element)
+
+ def _adapt_aliased_generation(self, element):
+ # this is crazy logic that I look forward to blowing away
+ # when aliased=True is gone :)
+ if "aliased_generation" in element._annotations:
+ for adapter in self._aliased_generations.get(
+ element._annotations["aliased_generation"], ()
+ ):
+ replaced_elem = adapter.replace(element)
+ if replaced_elem is not None:
+ return replaced_elem
+
+ return None
+
+ def _adapt_col_list(self, cols, current_adapter):
+ if current_adapter:
+ return [current_adapter(o, True) for o in cols]
+ else:
+ return cols
+
+ def _get_current_adapter(self):
+
+ adapters = []
+
+ if self._from_obj_alias:
+ # used for legacy going forward for query set_ops, e.g.
+ # union(), union_all(), etc.
+ # 1.4 and previously, also used for from_self(),
+ # select_entity_from()
+ #
+ # for the "from obj" alias, apply extra rule to the
+ # 'ORM only' check, if this query were generated from a
+ # subquery of itself, i.e. _from_selectable(), apply adaption
+ # to all SQL constructs.
+ adapters.append(
+ (
+ False
+ if self.compile_options._orm_only_from_obj_alias
+ else True,
+ self._from_obj_alias.replace,
+ )
+ )
+
+ # vvvvvvvvvvvvvvv legacy vvvvvvvvvvvvvvvvvv
+ # this can totally go away when we remove join(..., aliased=True)
+ if self._aliased_generations:
+ adapters.append((False, self._adapt_aliased_generation))
+ # ^^^^^^^^^^^^^ legacy ^^^^^^^^^^^^^^^^^^^^^
+
+ # this was *hopefully* the only adapter we were going to need
+ # going forward...however, we unfortunately need _from_obj_alias
+ # for query.union(), which we can't drop
+ if self._polymorphic_adapters:
+ adapters.append((False, self._adapt_polymorphic_element))
+
+ if not adapters:
+ return None
+
+ def _adapt_clause(clause, as_filter):
+ # do we adapt all expression elements or only those
+ # tagged as 'ORM' constructs ?
+
+ def replace(elem):
+ is_orm_adapt = (
+ "_orm_adapt" in elem._annotations
+ or "parententity" in elem._annotations
+ )
+ for always_adapt, adapter in adapters:
+ if is_orm_adapt or always_adapt:
+ e = adapter(elem)
+ if e is not None:
+ return e
+
+ return visitors.replacement_traverse(clause, {}, replace)
+
+ return _adapt_clause
+
+ def _join(self, args, entities_collection):
+ for (right, onclause, from_, flags) in args:
+ isouter = flags["isouter"]
+ full = flags["full"]
+ # maybe?
+ self._reset_joinpoint()
+
+ right = inspect(right)
+ if onclause is not None:
+ onclause = inspect(onclause)
+
+ if onclause is None and isinstance(
+ right, interfaces.PropComparator
+ ):
+ # determine onclause/right_entity. still need to think
+ # about how to best organize this since we are getting:
+ #
+ #
+ # q.join(Entity, Parent.property)
+ # q.join(Parent.property)
+ # q.join(Parent.property.of_type(Entity))
+ # q.join(some_table)
+ # q.join(some_table, some_parent.c.id==some_table.c.parent_id)
+ #
+ # is this still too many choices? how do we handle this
+ # when sometimes "right" is implied and sometimes not?
+ #
+ onclause = right
+ right = None
+ elif "parententity" in right._annotations:
+ right = right._annotations["parententity"]
+
+ if onclause is None:
+ if not right.is_selectable and not hasattr(right, "mapper"):
+ raise sa_exc.ArgumentError(
+ "Expected mapped entity or "
+ "selectable/table as join target"
+ )
+
+ of_type = None
+
+ if isinstance(onclause, interfaces.PropComparator):
+ # descriptor/property given (or determined); this tells us
+ # explicitly what the expected "left" side of the join is.
+
+ of_type = getattr(onclause, "_of_type", None)
+
+ if right is None:
+ if of_type:
+ right = of_type
+ else:
+ right = onclause.property
+
+ try:
+ right = right.entity
+ except AttributeError as err:
+ util.raise_(
+ sa_exc.ArgumentError(
+ "Join target %s does not refer to a "
+ "mapped entity" % right
+ ),
+ replace_context=err,
+ )
+
+ left = onclause._parententity
+
+ alias = self._polymorphic_adapters.get(left, None)
+
+ # could be None or could be ColumnAdapter also
+ if isinstance(alias, ORMAdapter) and alias.mapper.isa(left):
+ left = alias.aliased_class
+ onclause = getattr(left, onclause.key)
+
+ prop = onclause.property
+ if not isinstance(onclause, attributes.QueryableAttribute):
+ onclause = prop
+
+ # TODO: this is where "check for path already present"
+ # would occur. see if this still applies?
+
+ if from_ is not None:
+ if (
+ from_ is not left
+ and from_._annotations.get("parententity", None)
+ is not left
+ ):
+ raise sa_exc.InvalidRequestError(
+ "explicit from clause %s does not match left side "
+ "of relationship attribute %s"
+ % (
+ from_._annotations.get("parententity", from_),
+ onclause,
+ )
+ )
+ elif from_ is not None:
+ prop = None
+ left = from_
+ else:
+ # no descriptor/property given; we will need to figure out
+ # what the effective "left" side is
+ prop = left = None
+
+ # figure out the final "left" and "right" sides and create an
+ # ORMJoin to add to our _from_obj tuple
+ self._join_left_to_right(
+ entities_collection,
+ left,
+ right,
+ onclause,
+ prop,
+ False,
+ False,
+ isouter,
+ full,
+ )
+
+ def _legacy_join(self, args, entities_collection):
+ """consumes arguments from join() or outerjoin(), places them into a
+ consistent format with which to form the actual JOIN constructs.
+
+ """
+ for (right, onclause, left, flags) in args:
+
+ outerjoin = flags["isouter"]
+ create_aliases = flags["aliased"]
+ from_joinpoint = flags["from_joinpoint"]
+ full = flags["full"]
+ aliased_generation = flags["aliased_generation"]
+
+ # do a quick inspect to accommodate for a lambda
+ if right is not None and not isinstance(right, util.string_types):
+ right = inspect(right)
+ if onclause is not None and not isinstance(
+ onclause, util.string_types
+ ):
+ onclause = inspect(onclause)
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvv
+ if not from_joinpoint:
+ self._reset_joinpoint()
+ else:
+ prev_aliased_generation = self._joinpoint.get(
+ "aliased_generation", None
+ )
+ if not aliased_generation:
+ aliased_generation = prev_aliased_generation
+ elif prev_aliased_generation:
+ self._aliased_generations[
+ aliased_generation
+ ] = self._aliased_generations.get(
+ prev_aliased_generation, ()
+ )
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ if (
+ isinstance(
+ right, (interfaces.PropComparator, util.string_types)
+ )
+ and onclause is None
+ ):
+ onclause = right
+ right = None
+ elif "parententity" in right._annotations:
+ right = right._annotations["parententity"]
+
+ if onclause is None:
+ if not right.is_selectable and not hasattr(right, "mapper"):
+ raise sa_exc.ArgumentError(
+ "Expected mapped entity or "
+ "selectable/table as join target"
+ )
+
+ if isinstance(onclause, interfaces.PropComparator):
+ of_type = getattr(onclause, "_of_type", None)
+ else:
+ of_type = None
+
+ if isinstance(onclause, util.string_types):
+ # string given, e.g. query(Foo).join("bar").
+ # we look to the left entity or what we last joined
+ # towards
+ onclause = _entity_namespace_key(
+ inspect(self._joinpoint_zero()), onclause
+ )
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
+ # check for q.join(Class.propname, from_joinpoint=True)
+ # and Class corresponds at the mapper level to the current
+ # joinpoint. this match intentionally looks for a non-aliased
+ # class-bound descriptor as the onclause and if it matches the
+ # current joinpoint at the mapper level, it's used. This
+ # is a very old use case that is intended to make it easier
+ # to work with the aliased=True flag, which is also something
+ # that probably shouldn't exist on join() due to its high
+ # complexity/usefulness ratio
+ elif from_joinpoint and isinstance(
+ onclause, interfaces.PropComparator
+ ):
+ jp0 = self._joinpoint_zero()
+ info = inspect(jp0)
+
+ if getattr(info, "mapper", None) is onclause._parententity:
+ onclause = _entity_namespace_key(info, onclause.key)
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ if isinstance(onclause, interfaces.PropComparator):
+ # descriptor/property given (or determined); this tells us
+ # explicitly what the expected "left" side of the join is.
+ if right is None:
+ if of_type:
+ right = of_type
+ else:
+ right = onclause.property
+
+ try:
+ right = right.entity
+ except AttributeError as err:
+ util.raise_(
+ sa_exc.ArgumentError(
+ "Join target %s does not refer to a "
+ "mapped entity" % right
+ ),
+ replace_context=err,
+ )
+
+ left = onclause._parententity
+
+ alias = self._polymorphic_adapters.get(left, None)
+
+ # could be None or could be ColumnAdapter also
+ if isinstance(alias, ORMAdapter) and alias.mapper.isa(left):
+ left = alias.aliased_class
+ onclause = getattr(left, onclause.key)
+
+ prop = onclause.property
+ if not isinstance(onclause, attributes.QueryableAttribute):
+ onclause = prop
+
+ if not create_aliases:
+ # check for this path already present.
+ # don't render in that case.
+ edge = (left, right, prop.key)
+ if edge in self._joinpoint:
+ # The child's prev reference might be stale --
+ # it could point to a parent older than the
+ # current joinpoint. If this is the case,
+ # then we need to update it and then fix the
+ # tree's spine with _update_joinpoint. Copy
+ # and then mutate the child, which might be
+ # shared by a different query object.
+ jp = self._joinpoint[edge].copy()
+ jp["prev"] = (edge, self._joinpoint)
+ self._update_joinpoint(jp)
+
+ continue
+
+ else:
+ # no descriptor/property given; we will need to figure out
+ # what the effective "left" side is
+ prop = left = None
+
+ # figure out the final "left" and "right" sides and create an
+ # ORMJoin to add to our _from_obj tuple
+ self._join_left_to_right(
+ entities_collection,
+ left,
+ right,
+ onclause,
+ prop,
+ create_aliases,
+ aliased_generation,
+ outerjoin,
+ full,
+ )
+
+ def _joinpoint_zero(self):
+ return self._joinpoint.get("_joinpoint_entity", self._entity_zero())
+
+ def _join_left_to_right(
+ self,
+ entities_collection,
+ left,
+ right,
+ onclause,
+ prop,
+ create_aliases,
+ aliased_generation,
+ outerjoin,
+ full,
+ ):
+ """given raw "left", "right", "onclause" parameters consumed from
+ a particular key within _join(), add a real ORMJoin object to
+ our _from_obj list (or augment an existing one)
+
+ """
+
+ if left is None:
+ # left not given (e.g. no relationship object/name specified)
+ # figure out the best "left" side based on our existing froms /
+ # entities
+ assert prop is None
+ (
+ left,
+ replace_from_obj_index,
+ use_entity_index,
+ ) = self._join_determine_implicit_left_side(
+ entities_collection, left, right, onclause
+ )
+ else:
+ # left is given via a relationship/name, or as explicit left side.
+ # Determine where in our
+ # "froms" list it should be spliced/appended as well as what
+ # existing entity it corresponds to.
+ (
+ replace_from_obj_index,
+ use_entity_index,
+ ) = self._join_place_explicit_left_side(entities_collection, left)
+
+ if left is right and not create_aliases:
+ raise sa_exc.InvalidRequestError(
+ "Can't construct a join from %s to %s, they "
+ "are the same entity" % (left, right)
+ )
+
+ # the right side as given often needs to be adapted. additionally
+ # a lot of things can be wrong with it. handle all that and
+ # get back the new effective "right" side
+ r_info, right, onclause = self._join_check_and_adapt_right_side(
+ left, right, onclause, prop, create_aliases, aliased_generation
+ )
+
+ if not r_info.is_selectable:
+ extra_criteria = self._get_extra_criteria(r_info)
+ else:
+ extra_criteria = ()
+
+ if replace_from_obj_index is not None:
+ # splice into an existing element in the
+ # self._from_obj list
+ left_clause = self.from_clauses[replace_from_obj_index]
+
+ self.from_clauses = (
+ self.from_clauses[:replace_from_obj_index]
+ + [
+ _ORMJoin(
+ left_clause,
+ right,
+ onclause,
+ isouter=outerjoin,
+ full=full,
+ _extra_criteria=extra_criteria,
+ )
+ ]
+ + self.from_clauses[replace_from_obj_index + 1 :]
+ )
+ else:
+ # add a new element to the self._from_obj list
+ if use_entity_index is not None:
+ # make use of _MapperEntity selectable, which is usually
+ # entity_zero.selectable, but if with_polymorphic() were used
+ # might be distinct
+ assert isinstance(
+ entities_collection[use_entity_index], _MapperEntity
+ )
+ left_clause = entities_collection[use_entity_index].selectable
+ else:
+ left_clause = left
+
+ self.from_clauses = self.from_clauses + [
+ _ORMJoin(
+ left_clause,
+ r_info,
+ onclause,
+ isouter=outerjoin,
+ full=full,
+ _extra_criteria=extra_criteria,
+ )
+ ]
+
+ def _join_determine_implicit_left_side(
+ self, entities_collection, left, right, onclause
+ ):
+ """When join conditions don't express the left side explicitly,
+ determine if an existing FROM or entity in this query
+ can serve as the left hand side.
+
+ """
+
+ # when we are here, it means join() was called without an ORM-
+ # specific way of telling us what the "left" side is, e.g.:
+ #
+ # join(RightEntity)
+ #
+ # or
+ #
+ # join(RightEntity, RightEntity.foo == LeftEntity.bar)
+ #
+
+ r_info = inspect(right)
+
+ replace_from_obj_index = use_entity_index = None
+
+ if self.from_clauses:
+ # we have a list of FROMs already. So by definition this
+ # join has to connect to one of those FROMs.
+
+ indexes = sql_util.find_left_clause_to_join_from(
+ self.from_clauses, r_info.selectable, onclause
+ )
+
+ if len(indexes) == 1:
+ replace_from_obj_index = indexes[0]
+ left = self.from_clauses[replace_from_obj_index]
+ elif len(indexes) > 1:
+ raise sa_exc.InvalidRequestError(
+ "Can't determine which FROM clause to join "
+ "from, there are multiple FROMS which can "
+ "join to this entity. Please use the .select_from() "
+ "method to establish an explicit left side, as well as "
+ "providing an explicit ON clause if not present already "
+ "to help resolve the ambiguity."
+ )
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Don't know how to join to %r. "
+ "Please use the .select_from() "
+ "method to establish an explicit left side, as well as "
+ "providing an explicit ON clause if not present already "
+ "to help resolve the ambiguity." % (right,)
+ )
+
+ elif entities_collection:
+ # we have no explicit FROMs, so the implicit left has to
+ # come from our list of entities.
+
+ potential = {}
+ for entity_index, ent in enumerate(entities_collection):
+ entity = ent.entity_zero_or_selectable
+ if entity is None:
+ continue
+ ent_info = inspect(entity)
+ if ent_info is r_info: # left and right are the same, skip
+ continue
+
+ # by using a dictionary with the selectables as keys this
+ # de-duplicates those selectables as occurs when the query is
+ # against a series of columns from the same selectable
+ if isinstance(ent, _MapperEntity):
+ potential[ent.selectable] = (entity_index, entity)
+ else:
+ potential[ent_info.selectable] = (None, entity)
+
+ all_clauses = list(potential.keys())
+ indexes = sql_util.find_left_clause_to_join_from(
+ all_clauses, r_info.selectable, onclause
+ )
+
+ if len(indexes) == 1:
+ use_entity_index, left = potential[all_clauses[indexes[0]]]
+ elif len(indexes) > 1:
+ raise sa_exc.InvalidRequestError(
+ "Can't determine which FROM clause to join "
+ "from, there are multiple FROMS which can "
+ "join to this entity. Please use the .select_from() "
+ "method to establish an explicit left side, as well as "
+ "providing an explicit ON clause if not present already "
+ "to help resolve the ambiguity."
+ )
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Don't know how to join to %r. "
+ "Please use the .select_from() "
+ "method to establish an explicit left side, as well as "
+ "providing an explicit ON clause if not present already "
+ "to help resolve the ambiguity." % (right,)
+ )
+ else:
+ raise sa_exc.InvalidRequestError(
+ "No entities to join from; please use "
+ "select_from() to establish the left "
+ "entity/selectable of this join"
+ )
+
+ return left, replace_from_obj_index, use_entity_index
+
+ def _join_place_explicit_left_side(self, entities_collection, left):
+ """When join conditions express a left side explicitly, determine
+ where in our existing list of FROM clauses we should join towards,
+ or if we need to make a new join, and if so is it from one of our
+ existing entities.
+
+ """
+
+ # when we are here, it means join() was called with an indicator
+ # as to an exact left side, which means a path to a
+ # RelationshipProperty was given, e.g.:
+ #
+ # join(RightEntity, LeftEntity.right)
+ #
+ # or
+ #
+ # join(LeftEntity.right)
+ #
+ # as well as string forms:
+ #
+ # join(RightEntity, "right")
+ #
+ # etc.
+ #
+
+ replace_from_obj_index = use_entity_index = None
+
+ l_info = inspect(left)
+ if self.from_clauses:
+ indexes = sql_util.find_left_clause_that_matches_given(
+ self.from_clauses, l_info.selectable
+ )
+
+ if len(indexes) > 1:
+ raise sa_exc.InvalidRequestError(
+ "Can't identify which entity in which to assign the "
+ "left side of this join. Please use a more specific "
+ "ON clause."
+ )
+
+ # have an index, means the left side is already present in
+ # an existing FROM in the self._from_obj tuple
+ if indexes:
+ replace_from_obj_index = indexes[0]
+
+ # no index, means we need to add a new element to the
+ # self._from_obj tuple
+
+ # no from element present, so we will have to add to the
+ # self._from_obj tuple. Determine if this left side matches up
+ # with existing mapper entities, in which case we want to apply the
+ # aliasing / adaptation rules present on that entity if any
+ if (
+ replace_from_obj_index is None
+ and entities_collection
+ and hasattr(l_info, "mapper")
+ ):
+ for idx, ent in enumerate(entities_collection):
+ # TODO: should we be checking for multiple mapper entities
+ # matching?
+ if isinstance(ent, _MapperEntity) and ent.corresponds_to(left):
+ use_entity_index = idx
+ break
+
+ return replace_from_obj_index, use_entity_index
+
+ def _join_check_and_adapt_right_side(
+ self, left, right, onclause, prop, create_aliases, aliased_generation
+ ):
+ """transform the "right" side of the join as well as the onclause
+ according to polymorphic mapping translations, aliasing on the query
+ or on the join, special cases where the right and left side have
+ overlapping tables.
+
+ """
+
+ l_info = inspect(left)
+ r_info = inspect(right)
+
+ overlap = False
+ if not create_aliases:
+ right_mapper = getattr(r_info, "mapper", None)
+ # if the target is a joined inheritance mapping,
+ # be more liberal about auto-aliasing.
+ if right_mapper and (
+ right_mapper.with_polymorphic
+ or isinstance(right_mapper.persist_selectable, expression.Join)
+ ):
+ for from_obj in self.from_clauses or [l_info.selectable]:
+ if sql_util.selectables_overlap(
+ l_info.selectable, from_obj
+ ) and sql_util.selectables_overlap(
+ from_obj, r_info.selectable
+ ):
+ overlap = True
+ break
+
+ if (
+ overlap or not create_aliases
+ ) and l_info.selectable is r_info.selectable:
+ raise sa_exc.InvalidRequestError(
+ "Can't join table/selectable '%s' to itself"
+ % l_info.selectable
+ )
+
+ right_mapper, right_selectable, right_is_aliased = (
+ getattr(r_info, "mapper", None),
+ r_info.selectable,
+ getattr(r_info, "is_aliased_class", False),
+ )
+
+ if (
+ right_mapper
+ and prop
+ and not right_mapper.common_parent(prop.mapper)
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Join target %s does not correspond to "
+ "the right side of join condition %s" % (right, onclause)
+ )
+
+ # _join_entities is used as a hint for single-table inheritance
+ # purposes at the moment
+ if hasattr(r_info, "mapper"):
+ self._join_entities += (r_info,)
+
+ need_adapter = False
+
+ # test for joining to an unmapped selectable as the target
+ if r_info.is_clause_element:
+
+ if prop:
+ right_mapper = prop.mapper
+
+ if right_selectable._is_lateral:
+ # orm_only is disabled to suit the case where we have to
+ # adapt an explicit correlate(Entity) - the select() loses
+ # the ORM-ness in this case right now, ideally it would not
+ current_adapter = self._get_current_adapter()
+ if current_adapter is not None:
+ # TODO: we had orm_only=False here before, removing
+ # it didn't break things. if we identify the rationale,
+ # may need to apply "_orm_only" annotation here.
+ right = current_adapter(right, True)
+
+ elif prop:
+ # joining to selectable with a mapper property given
+ # as the ON clause
+
+ if not right_selectable.is_derived_from(
+ right_mapper.persist_selectable
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Selectable '%s' is not derived from '%s'"
+ % (
+ right_selectable.description,
+ right_mapper.persist_selectable.description,
+ )
+ )
+
+ # if the destination selectable is a plain select(),
+ # turn it into an alias().
+ if isinstance(right_selectable, expression.SelectBase):
+ right_selectable = coercions.expect(
+ roles.FromClauseRole, right_selectable
+ )
+ need_adapter = True
+
+ # make the right hand side target into an ORM entity
+ right = aliased(right_mapper, right_selectable)
+
+ util.warn_deprecated(
+ "An alias is being generated automatically against "
+ "joined entity %s for raw clauseelement, which is "
+ "deprecated and will be removed in a later release. "
+ "Use the aliased() "
+ "construct explicitly, see the linked example."
+ % right_mapper,
+ "1.4",
+ code="xaj1",
+ )
+
+ elif create_aliases:
+ # it *could* work, but it doesn't right now and I'd rather
+ # get rid of aliased=True completely
+ raise sa_exc.InvalidRequestError(
+ "The aliased=True parameter on query.join() only works "
+ "with an ORM entity, not a plain selectable, as the "
+ "target."
+ )
+
+ # test for overlap:
+ # orm/inheritance/relationships.py
+ # SelfReferentialM2MTest
+ aliased_entity = right_mapper and not right_is_aliased and overlap
+
+ if not need_adapter and (create_aliases or aliased_entity):
+ # there are a few places in the ORM that automatic aliasing
+ # is still desirable, and can't be automatic with a Core
+ # only approach. For illustrations of "overlaps" see
+ # test/orm/inheritance/test_relationships.py. There are also
+ # general overlap cases with many-to-many tables where automatic
+ # aliasing is desirable.
+ right = aliased(right, flat=True)
+ need_adapter = True
+
+ if not create_aliases:
+ util.warn(
+ "An alias is being generated automatically against "
+ "joined entity %s due to overlapping tables. This is a "
+ "legacy pattern which may be "
+ "deprecated in a later release. Use the "
+ "aliased(<entity>, flat=True) "
+ "construct explicitly, see the linked example."
+ % right_mapper,
+ code="xaj2",
+ )
+
+ if need_adapter:
+ assert right_mapper
+
+ adapter = ORMAdapter(
+ right, equivalents=right_mapper._equivalent_columns
+ )
+
+ # if an alias() on the right side was generated,
+ # which is intended to wrap a the right side in a subquery,
+ # ensure that columns retrieved from this target in the result
+ # set are also adapted.
+ if not create_aliases:
+ self._mapper_loads_polymorphically_with(right_mapper, adapter)
+ elif aliased_generation:
+ adapter._debug = True
+ self._aliased_generations[aliased_generation] = (
+ adapter,
+ ) + self._aliased_generations.get(aliased_generation, ())
+ elif (
+ not r_info.is_clause_element
+ and not right_is_aliased
+ and right_mapper.with_polymorphic
+ and isinstance(
+ right_mapper._with_polymorphic_selectable,
+ expression.AliasedReturnsRows,
+ )
+ ):
+ # for the case where the target mapper has a with_polymorphic
+ # set up, ensure an adapter is set up for criteria that works
+ # against this mapper. Previously, this logic used to
+ # use the "create_aliases or aliased_entity" case to generate
+ # an aliased() object, but this creates an alias that isn't
+ # strictly necessary.
+ # see test/orm/test_core_compilation.py
+ # ::RelNaturalAliasedJoinsTest::test_straight
+ # and similar
+ self._mapper_loads_polymorphically_with(
+ right_mapper,
+ sql_util.ColumnAdapter(
+ right_mapper.selectable,
+ right_mapper._equivalent_columns,
+ ),
+ )
+ # if the onclause is a ClauseElement, adapt it with any
+ # adapters that are in place right now
+ if isinstance(onclause, expression.ClauseElement):
+ current_adapter = self._get_current_adapter()
+ if current_adapter:
+ onclause = current_adapter(onclause, True)
+
+ # if joining on a MapperProperty path,
+ # track the path to prevent redundant joins
+ if not create_aliases and prop:
+ self._update_joinpoint(
+ {
+ "_joinpoint_entity": right,
+ "prev": ((left, right, prop.key), self._joinpoint),
+ "aliased_generation": aliased_generation,
+ }
+ )
+ else:
+ self._joinpoint = {
+ "_joinpoint_entity": right,
+ "aliased_generation": aliased_generation,
+ }
+
+ return inspect(right), right, onclause
+
+ def _update_joinpoint(self, jp):
+ self._joinpoint = jp
+ # copy backwards to the root of the _joinpath
+ # dict, so that no existing dict in the path is mutated
+ while "prev" in jp:
+ f, prev = jp["prev"]
+ prev = dict(prev)
+ prev[f] = jp.copy()
+ jp["prev"] = (f, prev)
+ jp = prev
+ self._joinpath = jp
+
+ def _reset_joinpoint(self):
+ self._joinpoint = self._joinpath
+
+ @property
+ def _select_args(self):
+ return {
+ "limit_clause": self.select_statement._limit_clause,
+ "offset_clause": self.select_statement._offset_clause,
+ "distinct": self.distinct,
+ "distinct_on": self.distinct_on,
+ "prefixes": self.select_statement._prefixes,
+ "suffixes": self.select_statement._suffixes,
+ "group_by": self.group_by or None,
+ "fetch_clause": self.select_statement._fetch_clause,
+ "fetch_clause_options": (
+ self.select_statement._fetch_clause_options
+ ),
+ }
+
+ @property
+ def _should_nest_selectable(self):
+ kwargs = self._select_args
+ return (
+ kwargs.get("limit_clause") is not None
+ or kwargs.get("offset_clause") is not None
+ or kwargs.get("distinct", False)
+ or kwargs.get("distinct_on", ())
+ or kwargs.get("group_by", False)
+ )
+
+ def _get_extra_criteria(self, ext_info):
+ if (
+ "additional_entity_criteria",
+ ext_info.mapper,
+ ) in self.global_attributes:
+ return tuple(
+ ae._resolve_where_criteria(ext_info)
+ for ae in self.global_attributes[
+ ("additional_entity_criteria", ext_info.mapper)
+ ]
+ if (ae.include_aliases or ae.entity is ext_info)
+ and ae._should_include(self)
+ )
+ else:
+ return ()
+
+ def _adjust_for_extra_criteria(self):
+ """Apply extra criteria filtering.
+
+ For all distinct single-table-inheritance mappers represented in
+ the columns clause of this query, as well as the "select from entity",
+ add criterion to the WHERE
+ clause of the given QueryContext such that only the appropriate
+ subtypes are selected from the total results.
+
+ Additionally, add WHERE criteria originating from LoaderCriteriaOptions
+ associated with the global context.
+
+ """
+
+ for fromclause in self.from_clauses:
+ ext_info = fromclause._annotations.get("parententity", None)
+ if (
+ ext_info
+ and (
+ ext_info.mapper._single_table_criterion is not None
+ or ("additional_entity_criteria", ext_info.mapper)
+ in self.global_attributes
+ )
+ and ext_info not in self.extra_criteria_entities
+ ):
+
+ self.extra_criteria_entities[ext_info] = (
+ ext_info,
+ ext_info._adapter if ext_info.is_aliased_class else None,
+ )
+
+ search = set(self.extra_criteria_entities.values())
+
+ for (ext_info, adapter) in search:
+ if ext_info in self._join_entities:
+ continue
+
+ single_crit = ext_info.mapper._single_table_criterion
+
+ if self.compile_options._for_refresh_state:
+ additional_entity_criteria = []
+ else:
+ additional_entity_criteria = self._get_extra_criteria(ext_info)
+
+ if single_crit is not None:
+ additional_entity_criteria += (single_crit,)
+
+ current_adapter = self._get_current_adapter()
+ for crit in additional_entity_criteria:
+ if adapter:
+ crit = adapter.traverse(crit)
+
+ if current_adapter:
+ crit = sql_util._deep_annotate(crit, {"_orm_adapt": True})
+ crit = current_adapter(crit, False)
+ self._where_criteria += (crit,)
+
+
+def _column_descriptions(
+ query_or_select_stmt, compile_state=None, legacy=False
+):
+ if compile_state is None:
+ compile_state = ORMSelectCompileState._create_entities_collection(
+ query_or_select_stmt, legacy=legacy
+ )
+ ctx = compile_state
+ return [
+ {
+ "name": ent._label_name,
+ "type": ent.type,
+ "aliased": getattr(insp_ent, "is_aliased_class", False),
+ "expr": ent.expr,
+ "entity": getattr(insp_ent, "entity", None)
+ if ent.entity_zero is not None and not insp_ent.is_clause_element
+ else None,
+ }
+ for ent, insp_ent in [
+ (
+ _ent,
+ (
+ inspect(_ent.entity_zero)
+ if _ent.entity_zero is not None
+ else None
+ ),
+ )
+ for _ent in ctx._entities
+ ]
+ ]
+
+
+def _legacy_filter_by_entity_zero(query_or_augmented_select):
+ self = query_or_augmented_select
+ if self._legacy_setup_joins:
+ _last_joined_entity = self._last_joined_entity
+ if _last_joined_entity is not None:
+ return _last_joined_entity
+
+ if self._from_obj and "parententity" in self._from_obj[0]._annotations:
+ return self._from_obj[0]._annotations["parententity"]
+
+ return _entity_from_pre_ent_zero(self)
+
+
+def _entity_from_pre_ent_zero(query_or_augmented_select):
+ self = query_or_augmented_select
+ if not self._raw_columns:
+ return None
+
+ ent = self._raw_columns[0]
+
+ if "parententity" in ent._annotations:
+ return ent._annotations["parententity"]
+ elif isinstance(ent, ORMColumnsClauseRole):
+ return ent.entity
+ elif "bundle" in ent._annotations:
+ return ent._annotations["bundle"]
+ else:
+ return ent
+
+
+def _legacy_determine_last_joined_entity(setup_joins, entity_zero):
+ """given the legacy_setup_joins collection at a point in time,
+ figure out what the "filter by entity" would be in terms
+ of those joins.
+
+ in 2.0 this logic should hopefully be much simpler as there will
+ be far fewer ways to specify joins with the ORM
+
+ """
+
+ if not setup_joins:
+ return entity_zero
+
+ # CAN BE REMOVED IN 2.0:
+ # 1. from_joinpoint
+ # 2. aliased_generation
+ # 3. aliased
+ # 4. any treating of prop as str
+ # 5. tuple madness
+ # 6. won't need recursive call anymore without #4
+ # 7. therefore can pass in just the last setup_joins record,
+ # don't need entity_zero
+
+ (right, onclause, left_, flags) = setup_joins[-1]
+
+ from_joinpoint = flags["from_joinpoint"]
+
+ if onclause is None and isinstance(
+ right, (str, interfaces.PropComparator)
+ ):
+ onclause = right
+ right = None
+
+ if right is not None and "parententity" in right._annotations:
+ right = right._annotations["parententity"].entity
+
+ if right is not None:
+ last_entity = right
+ insp = inspect(last_entity)
+ if insp.is_clause_element or insp.is_aliased_class or insp.is_mapper:
+ return insp
+
+ last_entity = onclause
+ if isinstance(last_entity, interfaces.PropComparator):
+ return last_entity.entity
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if isinstance(onclause, str):
+ if from_joinpoint:
+ prev = _legacy_determine_last_joined_entity(
+ setup_joins[0:-1], entity_zero
+ )
+ else:
+ prev = entity_zero
+
+ if prev is None:
+ return None
+
+ prev = inspect(prev)
+ attr = getattr(prev.entity, onclause, None)
+ if attr is not None:
+ return attr.property.entity
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ return None
+
+
+class _QueryEntity(object):
+ """represent an entity column returned within a Query result."""
+
+ __slots__ = ()
+
+ _non_hashable_value = False
+ _null_column_type = False
+ use_id_for_hash = False
+
+ @classmethod
+ def to_compile_state(
+ cls, compile_state, entities, entities_collection, is_current_entities
+ ):
+
+ for idx, entity in enumerate(entities):
+ if entity._is_lambda_element:
+ if entity._is_sequence:
+ cls.to_compile_state(
+ compile_state,
+ entity._resolved,
+ entities_collection,
+ is_current_entities,
+ )
+ continue
+ else:
+ entity = entity._resolved
+
+ if entity.is_clause_element:
+ if entity.is_selectable:
+ if "parententity" in entity._annotations:
+ _MapperEntity(
+ compile_state,
+ entity,
+ entities_collection,
+ is_current_entities,
+ )
+ else:
+ _ColumnEntity._for_columns(
+ compile_state,
+ entity._select_iterable,
+ entities_collection,
+ idx,
+ is_current_entities,
+ )
+ else:
+ if entity._annotations.get("bundle", False):
+ _BundleEntity(
+ compile_state,
+ entity,
+ entities_collection,
+ is_current_entities,
+ )
+ elif entity._is_clause_list:
+ # this is legacy only - test_composites.py
+ # test_query_cols_legacy
+ _ColumnEntity._for_columns(
+ compile_state,
+ entity._select_iterable,
+ entities_collection,
+ idx,
+ is_current_entities,
+ )
+ else:
+ _ColumnEntity._for_columns(
+ compile_state,
+ [entity],
+ entities_collection,
+ idx,
+ is_current_entities,
+ )
+ elif entity.is_bundle:
+ _BundleEntity(compile_state, entity, entities_collection)
+
+ return entities_collection
+
+
+class _MapperEntity(_QueryEntity):
+ """mapper/class/AliasedClass entity"""
+
+ __slots__ = (
+ "expr",
+ "mapper",
+ "entity_zero",
+ "is_aliased_class",
+ "path",
+ "_extra_entities",
+ "_label_name",
+ "_with_polymorphic_mappers",
+ "selectable",
+ "_polymorphic_discriminator",
+ )
+
+ def __init__(
+ self, compile_state, entity, entities_collection, is_current_entities
+ ):
+ entities_collection.append(self)
+ if is_current_entities:
+ if compile_state._primary_entity is None:
+ compile_state._primary_entity = self
+ compile_state._has_mapper_entities = True
+ compile_state._has_orm_entities = True
+
+ entity = entity._annotations["parententity"]
+ entity._post_inspect
+ ext_info = self.entity_zero = entity
+ entity = ext_info.entity
+
+ self.expr = entity
+ self.mapper = mapper = ext_info.mapper
+
+ self._extra_entities = (self.expr,)
+
+ if ext_info.is_aliased_class:
+ self._label_name = ext_info.name
+ else:
+ self._label_name = mapper.class_.__name__
+
+ self.is_aliased_class = ext_info.is_aliased_class
+ self.path = ext_info._path_registry
+
+ if ext_info in compile_state._with_polymorphic_adapt_map:
+ # this codepath occurs only if query.with_polymorphic() were
+ # used
+
+ wp = inspect(compile_state._with_polymorphic_adapt_map[ext_info])
+
+ if self.is_aliased_class:
+ # TODO: invalidrequest ?
+ raise NotImplementedError(
+ "Can't use with_polymorphic() against an Aliased object"
+ )
+
+ mappers, from_obj = mapper._with_polymorphic_args(
+ wp.with_polymorphic_mappers, wp.selectable
+ )
+
+ self._with_polymorphic_mappers = mappers
+ self.selectable = from_obj
+ self._polymorphic_discriminator = wp.polymorphic_on
+
+ else:
+ self.selectable = ext_info.selectable
+ self._with_polymorphic_mappers = ext_info.with_polymorphic_mappers
+ self._polymorphic_discriminator = ext_info.polymorphic_on
+
+ if (
+ mapper.with_polymorphic
+ # controversy - only if inheriting mapper is also
+ # polymorphic?
+ # or (mapper.inherits and mapper.inherits.with_polymorphic)
+ or mapper.inherits
+ or mapper._requires_row_aliasing
+ ):
+ compile_state._create_with_polymorphic_adapter(
+ ext_info, self.selectable
+ )
+
+ supports_single_entity = True
+
+ _non_hashable_value = True
+ use_id_for_hash = True
+
+ @property
+ def type(self):
+ return self.mapper.class_
+
+ @property
+ def entity_zero_or_selectable(self):
+ return self.entity_zero
+
+ def corresponds_to(self, entity):
+ return _entity_corresponds_to(self.entity_zero, entity)
+
+ def _get_entity_clauses(self, compile_state):
+
+ adapter = None
+
+ if not self.is_aliased_class:
+ if compile_state._polymorphic_adapters:
+ adapter = compile_state._polymorphic_adapters.get(
+ self.mapper, None
+ )
+ else:
+ adapter = self.entity_zero._adapter
+
+ if adapter:
+ if compile_state._from_obj_alias:
+ ret = adapter.wrap(compile_state._from_obj_alias)
+ else:
+ ret = adapter
+ else:
+ ret = compile_state._from_obj_alias
+
+ return ret
+
+ def row_processor(self, context, result):
+ compile_state = context.compile_state
+ adapter = self._get_entity_clauses(compile_state)
+
+ if compile_state.compound_eager_adapter and adapter:
+ adapter = adapter.wrap(compile_state.compound_eager_adapter)
+ elif not adapter:
+ adapter = compile_state.compound_eager_adapter
+
+ if compile_state._primary_entity is self:
+ only_load_props = compile_state.compile_options._only_load_props
+ refresh_state = context.refresh_state
+ else:
+ only_load_props = refresh_state = None
+
+ _instance = loading._instance_processor(
+ self,
+ self.mapper,
+ context,
+ result,
+ self.path,
+ adapter,
+ only_load_props=only_load_props,
+ refresh_state=refresh_state,
+ polymorphic_discriminator=self._polymorphic_discriminator,
+ )
+
+ return _instance, self._label_name, self._extra_entities
+
+ def setup_compile_state(self, compile_state):
+
+ adapter = self._get_entity_clauses(compile_state)
+
+ single_table_crit = self.mapper._single_table_criterion
+ if (
+ single_table_crit is not None
+ or ("additional_entity_criteria", self.mapper)
+ in compile_state.global_attributes
+ ):
+ ext_info = self.entity_zero
+ compile_state.extra_criteria_entities[ext_info] = (
+ ext_info,
+ ext_info._adapter if ext_info.is_aliased_class else None,
+ )
+
+ loading._setup_entity_query(
+ compile_state,
+ self.mapper,
+ self,
+ self.path,
+ adapter,
+ compile_state.primary_columns,
+ with_polymorphic=self._with_polymorphic_mappers,
+ only_load_props=compile_state.compile_options._only_load_props,
+ polymorphic_discriminator=self._polymorphic_discriminator,
+ )
+
+ compile_state._fallback_from_clauses.append(self.selectable)
+
+
+class _BundleEntity(_QueryEntity):
+
+ _extra_entities = ()
+
+ __slots__ = (
+ "bundle",
+ "expr",
+ "type",
+ "_label_name",
+ "_entities",
+ "supports_single_entity",
+ )
+
+ def __init__(
+ self,
+ compile_state,
+ expr,
+ entities_collection,
+ is_current_entities,
+ setup_entities=True,
+ parent_bundle=None,
+ ):
+ compile_state._has_orm_entities = True
+
+ expr = expr._annotations["bundle"]
+ if parent_bundle:
+ parent_bundle._entities.append(self)
+ else:
+ entities_collection.append(self)
+
+ if isinstance(
+ expr, (attributes.QueryableAttribute, interfaces.PropComparator)
+ ):
+ bundle = expr.__clause_element__()
+ else:
+ bundle = expr
+
+ self.bundle = self.expr = bundle
+ self.type = type(bundle)
+ self._label_name = bundle.name
+ self._entities = []
+
+ if setup_entities:
+ for expr in bundle.exprs:
+ if "bundle" in expr._annotations:
+ _BundleEntity(
+ compile_state,
+ expr,
+ entities_collection,
+ is_current_entities,
+ parent_bundle=self,
+ )
+ elif isinstance(expr, Bundle):
+ _BundleEntity(
+ compile_state,
+ expr,
+ entities_collection,
+ is_current_entities,
+ parent_bundle=self,
+ )
+ else:
+ _ORMColumnEntity._for_columns(
+ compile_state,
+ [expr],
+ entities_collection,
+ None,
+ is_current_entities,
+ parent_bundle=self,
+ )
+
+ self.supports_single_entity = self.bundle.single_entity
+ if (
+ self.supports_single_entity
+ and not compile_state.compile_options._use_legacy_query_style
+ ):
+ util.warn_deprecated_20(
+ "The Bundle.single_entity flag has no effect when "
+ "using 2.0 style execution."
+ )
+
+ @property
+ def mapper(self):
+ ezero = self.entity_zero
+ if ezero is not None:
+ return ezero.mapper
+ else:
+ return None
+
+ @property
+ def entity_zero(self):
+ for ent in self._entities:
+ ezero = ent.entity_zero
+ if ezero is not None:
+ return ezero
+ else:
+ return None
+
+ def corresponds_to(self, entity):
+ # TODO: we might be able to implement this but for now
+ # we are working around it
+ return False
+
+ @property
+ def entity_zero_or_selectable(self):
+ for ent in self._entities:
+ ezero = ent.entity_zero_or_selectable
+ if ezero is not None:
+ return ezero
+ else:
+ return None
+
+ def setup_compile_state(self, compile_state):
+ for ent in self._entities:
+ ent.setup_compile_state(compile_state)
+
+ def row_processor(self, context, result):
+ procs, labels, extra = zip(
+ *[ent.row_processor(context, result) for ent in self._entities]
+ )
+
+ proc = self.bundle.create_row_processor(context.query, procs, labels)
+
+ return proc, self._label_name, self._extra_entities
+
+
+class _ColumnEntity(_QueryEntity):
+ __slots__ = (
+ "_fetch_column",
+ "_row_processor",
+ "raw_column_index",
+ "translate_raw_column",
+ )
+
+ @classmethod
+ def _for_columns(
+ cls,
+ compile_state,
+ columns,
+ entities_collection,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=None,
+ ):
+ for column in columns:
+ annotations = column._annotations
+ if "parententity" in annotations:
+ _entity = annotations["parententity"]
+ else:
+ _entity = sql_util.extract_first_column_annotation(
+ column, "parententity"
+ )
+
+ if _entity:
+ if "identity_token" in column._annotations:
+ _IdentityTokenEntity(
+ compile_state,
+ column,
+ entities_collection,
+ _entity,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=parent_bundle,
+ )
+ else:
+ _ORMColumnEntity(
+ compile_state,
+ column,
+ entities_collection,
+ _entity,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=parent_bundle,
+ )
+ else:
+ _RawColumnEntity(
+ compile_state,
+ column,
+ entities_collection,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=parent_bundle,
+ )
+
+ @property
+ def type(self):
+ return self.column.type
+
+ @property
+ def _non_hashable_value(self):
+ return not self.column.type.hashable
+
+ @property
+ def _null_column_type(self):
+ return self.column.type._isnull
+
+ def row_processor(self, context, result):
+ compile_state = context.compile_state
+
+ # the resulting callable is entirely cacheable so just return
+ # it if we already made one
+ if self._row_processor is not None:
+ getter, label_name, extra_entities = self._row_processor
+ if self.translate_raw_column:
+ extra_entities += (
+ result.context.invoked_statement._raw_columns[
+ self.raw_column_index
+ ],
+ )
+
+ return getter, label_name, extra_entities
+
+ # retrieve the column that would have been set up in
+ # setup_compile_state, to avoid doing redundant work
+ if self._fetch_column is not None:
+ column = self._fetch_column
+ else:
+ # fetch_column will be None when we are doing a from_statement
+ # and setup_compile_state may not have been called.
+ column = self.column
+
+ # previously, the RawColumnEntity didn't look for from_obj_alias
+ # however I can't think of a case where we would be here and
+ # we'd want to ignore it if this is the from_statement use case.
+ # it's not really a use case to have raw columns + from_statement
+ if compile_state._from_obj_alias:
+ column = compile_state._from_obj_alias.columns[column]
+
+ if column._annotations:
+ # annotated columns perform more slowly in compiler and
+ # result due to the __eq__() method, so use deannotated
+ column = column._deannotate()
+
+ if compile_state.compound_eager_adapter:
+ column = compile_state.compound_eager_adapter.columns[column]
+
+ getter = result._getter(column)
+
+ ret = getter, self._label_name, self._extra_entities
+ self._row_processor = ret
+
+ if self.translate_raw_column:
+ extra_entities = self._extra_entities + (
+ result.context.invoked_statement._raw_columns[
+ self.raw_column_index
+ ],
+ )
+ return getter, self._label_name, extra_entities
+ else:
+ return ret
+
+
+class _RawColumnEntity(_ColumnEntity):
+ entity_zero = None
+ mapper = None
+ supports_single_entity = False
+
+ __slots__ = (
+ "expr",
+ "column",
+ "_label_name",
+ "entity_zero_or_selectable",
+ "_extra_entities",
+ )
+
+ def __init__(
+ self,
+ compile_state,
+ column,
+ entities_collection,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=None,
+ ):
+ self.expr = column
+ self.raw_column_index = raw_column_index
+ self.translate_raw_column = raw_column_index is not None
+
+ if column._is_star:
+ compile_state.compile_options += {"_is_star": True}
+
+ if not is_current_entities or column._is_text_clause:
+ self._label_name = None
+ else:
+ self._label_name = compile_state._label_convention(column)
+
+ if parent_bundle:
+ parent_bundle._entities.append(self)
+ else:
+ entities_collection.append(self)
+
+ self.column = column
+ self.entity_zero_or_selectable = (
+ self.column._from_objects[0] if self.column._from_objects else None
+ )
+ self._extra_entities = (self.expr, self.column)
+ self._fetch_column = self._row_processor = None
+
+ def corresponds_to(self, entity):
+ return False
+
+ def setup_compile_state(self, compile_state):
+ current_adapter = compile_state._get_current_adapter()
+ if current_adapter:
+ column = current_adapter(self.column, False)
+ else:
+ column = self.column
+
+ if column._annotations:
+ # annotated columns perform more slowly in compiler and
+ # result due to the __eq__() method, so use deannotated
+ column = column._deannotate()
+
+ compile_state.dedupe_columns.add(column)
+ compile_state.primary_columns.append(column)
+ self._fetch_column = column
+
+
+class _ORMColumnEntity(_ColumnEntity):
+ """Column/expression based entity."""
+
+ supports_single_entity = False
+
+ __slots__ = (
+ "expr",
+ "mapper",
+ "column",
+ "_label_name",
+ "entity_zero_or_selectable",
+ "entity_zero",
+ "_extra_entities",
+ )
+
+ def __init__(
+ self,
+ compile_state,
+ column,
+ entities_collection,
+ parententity,
+ raw_column_index,
+ is_current_entities,
+ parent_bundle=None,
+ ):
+ annotations = column._annotations
+
+ _entity = parententity
+
+ # an AliasedClass won't have proxy_key in the annotations for
+ # a column if it was acquired using the class' adapter directly,
+ # such as using AliasedInsp._adapt_element(). this occurs
+ # within internal loaders.
+
+ orm_key = annotations.get("proxy_key", None)
+ proxy_owner = annotations.get("proxy_owner", _entity)
+ if orm_key:
+ self.expr = getattr(proxy_owner.entity, orm_key)
+ self.translate_raw_column = False
+ else:
+ # if orm_key is not present, that means this is an ad-hoc
+ # SQL ColumnElement, like a CASE() or other expression.
+ # include this column position from the invoked statement
+ # in the ORM-level ResultSetMetaData on each execute, so that
+ # it can be targeted by identity after caching
+ self.expr = column
+ self.translate_raw_column = raw_column_index is not None
+
+ self.raw_column_index = raw_column_index
+
+ if is_current_entities:
+ self._label_name = compile_state._label_convention(
+ column, col_name=orm_key
+ )
+ else:
+ self._label_name = None
+
+ _entity._post_inspect
+ self.entity_zero = self.entity_zero_or_selectable = ezero = _entity
+ self.mapper = mapper = _entity.mapper
+
+ if parent_bundle:
+ parent_bundle._entities.append(self)
+ else:
+ entities_collection.append(self)
+
+ compile_state._has_orm_entities = True
+
+ self.column = column
+
+ self._fetch_column = self._row_processor = None
+
+ self._extra_entities = (self.expr, self.column)
+
+ if (
+ mapper.with_polymorphic
+ or mapper.inherits
+ or mapper._requires_row_aliasing
+ ):
+ compile_state._create_with_polymorphic_adapter(
+ ezero, ezero.selectable
+ )
+
+ def corresponds_to(self, entity):
+ if _is_aliased_class(entity):
+ # TODO: polymorphic subclasses ?
+ return entity is self.entity_zero
+ else:
+ return not _is_aliased_class(
+ self.entity_zero
+ ) and entity.common_parent(self.entity_zero)
+
+ def setup_compile_state(self, compile_state):
+ current_adapter = compile_state._get_current_adapter()
+ if current_adapter:
+ column = current_adapter(self.column, False)
+ else:
+ column = self.column
+
+ ezero = self.entity_zero
+
+ single_table_crit = self.mapper._single_table_criterion
+ if (
+ single_table_crit is not None
+ or ("additional_entity_criteria", self.mapper)
+ in compile_state.global_attributes
+ ):
+
+ compile_state.extra_criteria_entities[ezero] = (
+ ezero,
+ ezero._adapter if ezero.is_aliased_class else None,
+ )
+
+ if column._annotations and not column._expression_label:
+ # annotated columns perform more slowly in compiler and
+ # result due to the __eq__() method, so use deannotated
+ column = column._deannotate()
+
+ # use entity_zero as the from if we have it. this is necessary
+ # for polymorphic scenarios where our FROM is based on ORM entity,
+ # not the FROM of the column. but also, don't use it if our column
+ # doesn't actually have any FROMs that line up, such as when its
+ # a scalar subquery.
+ if set(self.column._from_objects).intersection(
+ ezero.selectable._from_objects
+ ):
+ compile_state._fallback_from_clauses.append(ezero.selectable)
+
+ compile_state.dedupe_columns.add(column)
+ compile_state.primary_columns.append(column)
+ self._fetch_column = column
+
+
+class _IdentityTokenEntity(_ORMColumnEntity):
+ translate_raw_column = False
+
+ def setup_compile_state(self, compile_state):
+ pass
+
+ def row_processor(self, context, result):
+ def getter(row):
+ return context.load_options._refresh_identity_token
+
+ return getter, self._label_name, self._extra_entities
diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py
new file mode 100644
index 0000000..16f91c6
--- /dev/null
+++ b/lib/sqlalchemy/orm/decl_api.py
@@ -0,0 +1,1062 @@
+# ext/declarative/api.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+"""Public API functions and helpers for declarative."""
+from __future__ import absolute_import
+
+import itertools
+import re
+import weakref
+
+from . import attributes
+from . import clsregistry
+from . import exc as orm_exc
+from . import instrumentation
+from . import interfaces
+from . import mapper as mapperlib
+from .base import _inspect_mapped_class
+from .decl_base import _add_attribute
+from .decl_base import _as_declarative
+from .decl_base import _declarative_constructor
+from .decl_base import _DeferredMapperConfig
+from .decl_base import _del_attribute
+from .decl_base import _mapper
+from .descriptor_props import SynonymProperty as _orm_synonym
+from .. import exc
+from .. import inspection
+from .. import util
+from ..sql.schema import MetaData
+from ..util import hybridmethod
+from ..util import hybridproperty
+
+
+def has_inherited_table(cls):
+ """Given a class, return True if any of the classes it inherits from has a
+ mapped table, otherwise return False.
+
+ This is used in declarative mixins to build attributes that behave
+ differently for the base class vs. a subclass in an inheritance
+ hierarchy.
+
+ .. seealso::
+
+ :ref:`decl_mixin_inheritance`
+
+ """
+ for class_ in cls.__mro__[1:]:
+ if getattr(class_, "__table__", None) is not None:
+ return True
+ return False
+
+
+class DeclarativeMeta(type):
+ def __init__(cls, classname, bases, dict_, **kw):
+ # use cls.__dict__, which can be modified by an
+ # __init_subclass__() method (#7900)
+ dict_ = cls.__dict__
+
+ # early-consume registry from the initial declarative base,
+ # assign privately to not conflict with subclass attributes named
+ # "registry"
+ reg = getattr(cls, "_sa_registry", None)
+ if reg is None:
+ reg = dict_.get("registry", None)
+ if not isinstance(reg, registry):
+ raise exc.InvalidRequestError(
+ "Declarative base class has no 'registry' attribute, "
+ "or registry is not a sqlalchemy.orm.registry() object"
+ )
+ else:
+ cls._sa_registry = reg
+
+ if not cls.__dict__.get("__abstract__", False):
+ _as_declarative(reg, cls, dict_)
+ type.__init__(cls, classname, bases, dict_)
+
+ def __setattr__(cls, key, value):
+ _add_attribute(cls, key, value)
+
+ def __delattr__(cls, key):
+ _del_attribute(cls, key)
+
+
+def synonym_for(name, map_column=False):
+ """Decorator that produces an :func:`_orm.synonym`
+ attribute in conjunction with a Python descriptor.
+
+ The function being decorated is passed to :func:`_orm.synonym` as the
+ :paramref:`.orm.synonym.descriptor` parameter::
+
+ class MyClass(Base):
+ __tablename__ = 'my_table'
+
+ id = Column(Integer, primary_key=True)
+ _job_status = Column("job_status", String(50))
+
+ @synonym_for("job_status")
+ @property
+ def job_status(self):
+ return "Status: %s" % self._job_status
+
+ The :ref:`hybrid properties <mapper_hybrids>` feature of SQLAlchemy
+ is typically preferred instead of synonyms, which is a more legacy
+ feature.
+
+ .. seealso::
+
+ :ref:`synonyms` - Overview of synonyms
+
+ :func:`_orm.synonym` - the mapper-level function
+
+ :ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
+ updated approach to augmenting attribute behavior more flexibly than
+ can be achieved with synonyms.
+
+ """
+
+ def decorate(fn):
+ return _orm_synonym(name, map_column=map_column, descriptor=fn)
+
+ return decorate
+
+
+class declared_attr(interfaces._MappedAttribute, property):
+ """Mark a class-level method as representing the definition of
+ a mapped property or special declarative member name.
+
+ :class:`_orm.declared_attr` is typically applied as a decorator to a class
+ level method, turning the attribute into a scalar-like property that can be
+ invoked from the uninstantiated class. The Declarative mapping process
+ looks for these :class:`_orm.declared_attr` callables as it scans classes,
+ and assumes any attribute marked with :class:`_orm.declared_attr` will be a
+ callable that will produce an object specific to the Declarative mapping or
+ table configuration.
+
+ :class:`_orm.declared_attr` is usually applicable to mixins, to define
+ relationships that are to be applied to different implementors of the
+ class. It is also used to define :class:`_schema.Column` objects that
+ include the :class:`_schema.ForeignKey` construct, as these cannot be
+ easily reused across different mappings. The example below illustrates
+ both::
+
+ class ProvidesUser(object):
+ "A mixin that adds a 'user' relationship to classes."
+
+ @declared_attr
+ def user_id(self):
+ return Column(ForeignKey("user_account.id"))
+
+ @declared_attr
+ def user(self):
+ return relationship("User")
+
+ :class:`_orm.declared_attr` can also be applied to mapped classes, such as
+ to provide a "polymorphic" scheme for inheritance::
+
+ class Employee(Base):
+ id = Column(Integer, primary_key=True)
+ type = Column(String(50), nullable=False)
+
+ @declared_attr
+ def __tablename__(cls):
+ return cls.__name__.lower()
+
+ @declared_attr
+ def __mapper_args__(cls):
+ if cls.__name__ == 'Employee':
+ return {
+ "polymorphic_on":cls.type,
+ "polymorphic_identity":"Employee"
+ }
+ else:
+ return {"polymorphic_identity":cls.__name__}
+
+ To use :class:`_orm.declared_attr` inside of a Python dataclass
+ as discussed at :ref:`orm_declarative_dataclasses_declarative_table`,
+ it may be placed directly inside the field metadata using a lambda::
+
+ @dataclass
+ class AddressMixin:
+ __sa_dataclass_metadata_key__ = "sa"
+
+ user_id: int = field(
+ init=False, metadata={"sa": declared_attr(lambda: Column(ForeignKey("user.id")))}
+ )
+ user: User = field(
+ init=False, metadata={"sa": declared_attr(lambda: relationship(User))}
+ )
+
+ :class:`_orm.declared_attr` also may be omitted from this form using a
+ lambda directly, as in::
+
+ user: User = field(
+ init=False, metadata={"sa": lambda: relationship(User)}
+ )
+
+ .. seealso::
+
+ :ref:`orm_mixins_toplevel` - illustrates how to use Declarative Mixins
+ which is the primary use case for :class:`_orm.declared_attr`
+
+ :ref:`orm_declarative_dataclasses_mixin` - illustrates special forms
+ for use with Python dataclasses
+
+ """ # noqa: E501
+
+ def __init__(self, fget, cascading=False):
+ super(declared_attr, self).__init__(fget)
+ self.__doc__ = fget.__doc__
+ self._cascading = cascading
+
+ def __get__(desc, self, cls):
+ # the declared_attr needs to make use of a cache that exists
+ # for the span of the declarative scan_attributes() phase.
+ # to achieve this we look at the class manager that's configured.
+ manager = attributes.manager_of_class(cls)
+ if manager is None:
+ if not re.match(r"^__.+__$", desc.fget.__name__):
+ # if there is no manager at all, then this class hasn't been
+ # run through declarative or mapper() at all, emit a warning.
+ util.warn(
+ "Unmanaged access of declarative attribute %s from "
+ "non-mapped class %s" % (desc.fget.__name__, cls.__name__)
+ )
+ return desc.fget(cls)
+ elif manager.is_mapped:
+ # the class is mapped, which means we're outside of the declarative
+ # scan setup, just run the function.
+ return desc.fget(cls)
+
+ # here, we are inside of the declarative scan. use the registry
+ # that is tracking the values of these attributes.
+ declarative_scan = manager.declarative_scan()
+ assert declarative_scan is not None
+ reg = declarative_scan.declared_attr_reg
+
+ if desc in reg:
+ return reg[desc]
+ else:
+ reg[desc] = obj = desc.fget(cls)
+ return obj
+
+ @hybridmethod
+ def _stateful(cls, **kw):
+ return _stateful_declared_attr(**kw)
+
+ @hybridproperty
+ def cascading(cls):
+ """Mark a :class:`.declared_attr` as cascading.
+
+ This is a special-use modifier which indicates that a column
+ or MapperProperty-based declared attribute should be configured
+ distinctly per mapped subclass, within a mapped-inheritance scenario.
+
+ .. warning::
+
+ The :attr:`.declared_attr.cascading` modifier has several
+ limitations:
+
+ * The flag **only** applies to the use of :class:`.declared_attr`
+ on declarative mixin classes and ``__abstract__`` classes; it
+ currently has no effect when used on a mapped class directly.
+
+ * The flag **only** applies to normally-named attributes, e.g.
+ not any special underscore attributes such as ``__tablename__``.
+ On these attributes it has **no** effect.
+
+ * The flag currently **does not allow further overrides** down
+ the class hierarchy; if a subclass tries to override the
+ attribute, a warning is emitted and the overridden attribute
+ is skipped. This is a limitation that it is hoped will be
+ resolved at some point.
+
+ Below, both MyClass as well as MySubClass will have a distinct
+ ``id`` Column object established::
+
+ class HasIdMixin(object):
+ @declared_attr.cascading
+ def id(cls):
+ if has_inherited_table(cls):
+ return Column(
+ ForeignKey('myclass.id'), primary_key=True
+ )
+ else:
+ return Column(Integer, primary_key=True)
+
+ class MyClass(HasIdMixin, Base):
+ __tablename__ = 'myclass'
+ # ...
+
+ class MySubClass(MyClass):
+ ""
+ # ...
+
+ The behavior of the above configuration is that ``MySubClass``
+ will refer to both its own ``id`` column as well as that of
+ ``MyClass`` underneath the attribute named ``some_id``.
+
+ .. seealso::
+
+ :ref:`declarative_inheritance`
+
+ :ref:`mixin_inheritance_columns`
+
+
+ """
+ return cls._stateful(cascading=True)
+
+
+class _stateful_declared_attr(declared_attr):
+ def __init__(self, **kw):
+ self.kw = kw
+
+ def _stateful(self, **kw):
+ new_kw = self.kw.copy()
+ new_kw.update(kw)
+ return _stateful_declared_attr(**new_kw)
+
+ def __call__(self, fn):
+ return declared_attr(fn, **self.kw)
+
+
+def declarative_mixin(cls):
+ """Mark a class as providing the feature of "declarative mixin".
+
+ E.g.::
+
+ from sqlalchemy.orm import declared_attr
+ from sqlalchemy.orm import declarative_mixin
+
+ @declarative_mixin
+ class MyMixin:
+
+ @declared_attr
+ def __tablename__(cls):
+ return cls.__name__.lower()
+
+ __table_args__ = {'mysql_engine': 'InnoDB'}
+ __mapper_args__= {'always_refresh': True}
+
+ id = Column(Integer, primary_key=True)
+
+ class MyModel(MyMixin, Base):
+ name = Column(String(1000))
+
+ The :func:`_orm.declarative_mixin` decorator currently does not modify
+ the given class in any way; it's current purpose is strictly to assist
+ the :ref:`Mypy plugin <mypy_toplevel>` in being able to identify
+ SQLAlchemy declarative mixin classes when no other context is present.
+
+ .. versionadded:: 1.4.6
+
+ .. seealso::
+
+ :ref:`orm_mixins_toplevel`
+
+ :ref:`mypy_declarative_mixins` - in the
+ :ref:`Mypy plugin documentation <mypy_toplevel>`
+
+ """ # noqa: E501
+
+ return cls
+
+
+def declarative_base(
+ bind=None,
+ metadata=None,
+ mapper=None,
+ cls=object,
+ name="Base",
+ constructor=_declarative_constructor,
+ class_registry=None,
+ metaclass=DeclarativeMeta,
+):
+ r"""Construct a base class for declarative class definitions.
+
+ The new base class will be given a metaclass that produces
+ appropriate :class:`~sqlalchemy.schema.Table` objects and makes
+ the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
+ information provided declaratively in the class and any subclasses
+ of the class.
+
+ The :func:`_orm.declarative_base` function is a shorthand version
+ of using the :meth:`_orm.registry.generate_base`
+ method. That is, the following::
+
+ from sqlalchemy.orm import declarative_base
+
+ Base = declarative_base()
+
+ Is equivalent to::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+ Base = mapper_registry.generate_base()
+
+ See the docstring for :class:`_orm.registry`
+ and :meth:`_orm.registry.generate_base`
+ for more details.
+
+ .. versionchanged:: 1.4 The :func:`_orm.declarative_base`
+ function is now a specialization of the more generic
+ :class:`_orm.registry` class. The function also moves to the
+ ``sqlalchemy.orm`` package from the ``declarative.ext`` package.
+
+
+ :param bind: An optional
+ :class:`~sqlalchemy.engine.Connectable`, will be assigned
+ the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
+ instance.
+
+ .. deprecated:: 1.4 The "bind" argument to declarative_base is
+ deprecated and will be removed in SQLAlchemy 2.0.
+
+ :param metadata:
+ An optional :class:`~sqlalchemy.schema.MetaData` instance. All
+ :class:`~sqlalchemy.schema.Table` objects implicitly declared by
+ subclasses of the base will share this MetaData. A MetaData instance
+ will be created if none is provided. The
+ :class:`~sqlalchemy.schema.MetaData` instance will be available via the
+ ``metadata`` attribute of the generated declarative base class.
+
+ :param mapper:
+ An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
+ be used to map subclasses to their Tables.
+
+ :param cls:
+ Defaults to :class:`object`. A type to use as the base for the generated
+ declarative base class. May be a class or tuple of classes.
+
+ :param name:
+ Defaults to ``Base``. The display name for the generated
+ class. Customizing this is not required, but can improve clarity in
+ tracebacks and debugging.
+
+ :param constructor:
+ Specify the implementation for the ``__init__`` function on a mapped
+ class that has no ``__init__`` of its own. Defaults to an
+ implementation that assigns \**kwargs for declared
+ fields and relationships to an instance. If ``None`` is supplied,
+ no __init__ will be provided and construction will fall back to
+ cls.__init__ by way of the normal Python semantics.
+
+ :param class_registry: optional dictionary that will serve as the
+ registry of class names-> mapped classes when string names
+ are used to identify classes inside of :func:`_orm.relationship`
+ and others. Allows two or more declarative base classes
+ to share the same registry of class names for simplified
+ inter-base relationships.
+
+ :param metaclass:
+ Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
+ compatible callable to use as the meta type of the generated
+ declarative base class.
+
+ .. seealso::
+
+ :class:`_orm.registry`
+
+ """
+
+ if bind is not None:
+ # util.deprecated_params does not work
+ util.warn_deprecated_20(
+ "The ``bind`` argument to declarative_base is "
+ "deprecated and will be removed in SQLAlchemy 2.0.",
+ )
+
+ return registry(
+ _bind=bind,
+ metadata=metadata,
+ class_registry=class_registry,
+ constructor=constructor,
+ ).generate_base(
+ mapper=mapper,
+ cls=cls,
+ name=name,
+ metaclass=metaclass,
+ )
+
+
+class registry(object):
+ """Generalized registry for mapping classes.
+
+ The :class:`_orm.registry` serves as the basis for maintaining a collection
+ of mappings, and provides configurational hooks used to map classes.
+
+ The three general kinds of mappings supported are Declarative Base,
+ Declarative Decorator, and Imperative Mapping. All of these mapping
+ styles may be used interchangeably:
+
+ * :meth:`_orm.registry.generate_base` returns a new declarative base
+ class, and is the underlying implementation of the
+ :func:`_orm.declarative_base` function.
+
+ * :meth:`_orm.registry.mapped` provides a class decorator that will
+ apply declarative mapping to a class without the use of a declarative
+ base class.
+
+ * :meth:`_orm.registry.map_imperatively` will produce a
+ :class:`_orm.Mapper` for a class without scanning the class for
+ declarative class attributes. This method suits the use case historically
+ provided by the
+ :func:`_orm.mapper` classical mapping function.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`orm_mapping_classes_toplevel` - overview of class mapping
+ styles.
+
+ """
+
+ def __init__(
+ self,
+ metadata=None,
+ class_registry=None,
+ constructor=_declarative_constructor,
+ _bind=None,
+ ):
+ r"""Construct a new :class:`_orm.registry`
+
+ :param metadata:
+ An optional :class:`_schema.MetaData` instance. All
+ :class:`_schema.Table` objects generated using declarative
+ table mapping will make use of this :class:`_schema.MetaData`
+ collection. If this argument is left at its default of ``None``,
+ a blank :class:`_schema.MetaData` collection is created.
+
+ :param constructor:
+ Specify the implementation for the ``__init__`` function on a mapped
+ class that has no ``__init__`` of its own. Defaults to an
+ implementation that assigns \**kwargs for declared
+ fields and relationships to an instance. If ``None`` is supplied,
+ no __init__ will be provided and construction will fall back to
+ cls.__init__ by way of the normal Python semantics.
+
+ :param class_registry: optional dictionary that will serve as the
+ registry of class names-> mapped classes when string names
+ are used to identify classes inside of :func:`_orm.relationship`
+ and others. Allows two or more declarative base classes
+ to share the same registry of class names for simplified
+ inter-base relationships.
+
+ """
+ lcl_metadata = metadata or MetaData()
+ if _bind:
+ lcl_metadata.bind = _bind
+
+ if class_registry is None:
+ class_registry = weakref.WeakValueDictionary()
+
+ self._class_registry = class_registry
+ self._managers = weakref.WeakKeyDictionary()
+ self._non_primary_mappers = weakref.WeakKeyDictionary()
+ self.metadata = lcl_metadata
+ self.constructor = constructor
+
+ self._dependents = set()
+ self._dependencies = set()
+
+ self._new_mappers = False
+
+ with mapperlib._CONFIGURE_MUTEX:
+ mapperlib._mapper_registries[self] = True
+
+ @property
+ def mappers(self):
+ """read only collection of all :class:`_orm.Mapper` objects."""
+
+ return frozenset(manager.mapper for manager in self._managers).union(
+ self._non_primary_mappers
+ )
+
+ def _set_depends_on(self, registry):
+ if registry is self:
+ return
+ registry._dependents.add(self)
+ self._dependencies.add(registry)
+
+ def _flag_new_mapper(self, mapper):
+ mapper._ready_for_configure = True
+ if self._new_mappers:
+ return
+
+ for reg in self._recurse_with_dependents({self}):
+ reg._new_mappers = True
+
+ @classmethod
+ def _recurse_with_dependents(cls, registries):
+ todo = registries
+ done = set()
+ while todo:
+ reg = todo.pop()
+ done.add(reg)
+
+ # if yielding would remove dependents, make sure we have
+ # them before
+ todo.update(reg._dependents.difference(done))
+ yield reg
+
+ # if yielding would add dependents, make sure we have them
+ # after
+ todo.update(reg._dependents.difference(done))
+
+ @classmethod
+ def _recurse_with_dependencies(cls, registries):
+ todo = registries
+ done = set()
+ while todo:
+ reg = todo.pop()
+ done.add(reg)
+
+ # if yielding would remove dependencies, make sure we have
+ # them before
+ todo.update(reg._dependencies.difference(done))
+
+ yield reg
+
+ # if yielding would remove dependencies, make sure we have
+ # them before
+ todo.update(reg._dependencies.difference(done))
+
+ def _mappers_to_configure(self):
+ return itertools.chain(
+ (
+ manager.mapper
+ for manager in list(self._managers)
+ if manager.is_mapped
+ and not manager.mapper.configured
+ and manager.mapper._ready_for_configure
+ ),
+ (
+ npm
+ for npm in list(self._non_primary_mappers)
+ if not npm.configured and npm._ready_for_configure
+ ),
+ )
+
+ def _add_non_primary_mapper(self, np_mapper):
+ self._non_primary_mappers[np_mapper] = True
+
+ def _dispose_cls(self, cls):
+ clsregistry.remove_class(cls.__name__, cls, self._class_registry)
+
+ def _add_manager(self, manager):
+ self._managers[manager] = True
+ if manager.registry is not None and manager.is_mapped:
+ raise exc.ArgumentError(
+ "Class '%s' already has a primary mapper defined. "
+ % manager.class_
+ )
+ manager.registry = self
+
+ def configure(self, cascade=False):
+ """Configure all as-yet unconfigured mappers in this
+ :class:`_orm.registry`.
+
+ The configure step is used to reconcile and initialize the
+ :func:`_orm.relationship` linkages between mapped classes, as well as
+ to invoke configuration events such as the
+ :meth:`_orm.MapperEvents.before_configured` and
+ :meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
+ extensions or user-defined extension hooks.
+
+ If one or more mappers in this registry contain
+ :func:`_orm.relationship` constructs that refer to mapped classes in
+ other registries, this registry is said to be *dependent* on those
+ registries. In order to configure those dependent registries
+ automatically, the :paramref:`_orm.registry.configure.cascade` flag
+ should be set to ``True``. Otherwise, if they are not configured, an
+ exception will be raised. The rationale behind this behavior is to
+ allow an application to programmatically invoke configuration of
+ registries while controlling whether or not the process implicitly
+ reaches other registries.
+
+ As an alternative to invoking :meth:`_orm.registry.configure`, the ORM
+ function :func:`_orm.configure_mappers` function may be used to ensure
+ configuration is complete for all :class:`_orm.registry` objects in
+ memory. This is generally simpler to use and also predates the usage of
+ :class:`_orm.registry` objects overall. However, this function will
+ impact all mappings throughout the running Python process and may be
+ more memory/time consuming for an application that has many registries
+ in use for different purposes that may not be needed immediately.
+
+ .. seealso::
+
+ :func:`_orm.configure_mappers`
+
+
+ .. versionadded:: 1.4.0b2
+
+ """
+ mapperlib._configure_registries({self}, cascade=cascade)
+
+ def dispose(self, cascade=False):
+ """Dispose of all mappers in this :class:`_orm.registry`.
+
+ After invocation, all the classes that were mapped within this registry
+ will no longer have class instrumentation associated with them. This
+ method is the per-:class:`_orm.registry` analogue to the
+ application-wide :func:`_orm.clear_mappers` function.
+
+ If this registry contains mappers that are dependencies of other
+ registries, typically via :func:`_orm.relationship` links, then those
+ registries must be disposed as well. When such registries exist in
+ relation to this one, their :meth:`_orm.registry.dispose` method will
+ also be called, if the :paramref:`_orm.registry.dispose.cascade` flag
+ is set to ``True``; otherwise, an error is raised if those registries
+ were not already disposed.
+
+ .. versionadded:: 1.4.0b2
+
+ .. seealso::
+
+ :func:`_orm.clear_mappers`
+
+ """
+
+ mapperlib._dispose_registries({self}, cascade=cascade)
+
+ def _dispose_manager_and_mapper(self, manager):
+ if "mapper" in manager.__dict__:
+ mapper = manager.mapper
+
+ mapper._set_dispose_flags()
+
+ class_ = manager.class_
+ self._dispose_cls(class_)
+ instrumentation._instrumentation_factory.unregister(class_)
+
+ def generate_base(
+ self,
+ mapper=None,
+ cls=object,
+ name="Base",
+ metaclass=DeclarativeMeta,
+ ):
+ """Generate a declarative base class.
+
+ Classes that inherit from the returned class object will be
+ automatically mapped using declarative mapping.
+
+ E.g.::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+
+ Base = mapper_registry.generate_base()
+
+ class MyClass(Base):
+ __tablename__ = "my_table"
+ id = Column(Integer, primary_key=True)
+
+ The above dynamically generated class is equivalent to the
+ non-dynamic example below::
+
+ from sqlalchemy.orm import registry
+ from sqlalchemy.orm.decl_api import DeclarativeMeta
+
+ mapper_registry = registry()
+
+ class Base(metaclass=DeclarativeMeta):
+ __abstract__ = True
+ registry = mapper_registry
+ metadata = mapper_registry.metadata
+
+ __init__ = mapper_registry.constructor
+
+ The :meth:`_orm.registry.generate_base` method provides the
+ implementation for the :func:`_orm.declarative_base` function, which
+ creates the :class:`_orm.registry` and base class all at once.
+
+ See the section :ref:`orm_declarative_mapping` for background and
+ examples.
+
+ :param mapper:
+ An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`.
+ This function is used to generate new :class:`_orm.Mapper` objects.
+
+ :param cls:
+ Defaults to :class:`object`. A type to use as the base for the
+ generated declarative base class. May be a class or tuple of classes.
+
+ :param name:
+ Defaults to ``Base``. The display name for the generated
+ class. Customizing this is not required, but can improve clarity in
+ tracebacks and debugging.
+
+ :param metaclass:
+ Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
+ compatible callable to use as the meta type of the generated
+ declarative base class.
+
+ .. seealso::
+
+ :ref:`orm_declarative_mapping`
+
+ :func:`_orm.declarative_base`
+
+ """
+ metadata = self.metadata
+
+ bases = not isinstance(cls, tuple) and (cls,) or cls
+
+ class_dict = dict(registry=self, metadata=metadata)
+ if isinstance(cls, type):
+ class_dict["__doc__"] = cls.__doc__
+
+ if self.constructor:
+ class_dict["__init__"] = self.constructor
+
+ class_dict["__abstract__"] = True
+ if mapper:
+ class_dict["__mapper_cls__"] = mapper
+
+ if hasattr(cls, "__class_getitem__"):
+
+ def __class_getitem__(cls, key):
+ # allow generic classes in py3.9+
+ return cls
+
+ class_dict["__class_getitem__"] = __class_getitem__
+
+ return metaclass(name, bases, class_dict)
+
+ def mapped(self, cls):
+ """Class decorator that will apply the Declarative mapping process
+ to a given class.
+
+ E.g.::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+
+ @mapper_registry.mapped
+ class Foo:
+ __tablename__ = 'some_table'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String)
+
+ See the section :ref:`orm_declarative_mapping` for complete
+ details and examples.
+
+ :param cls: class to be mapped.
+
+ :return: the class that was passed.
+
+ .. seealso::
+
+ :ref:`orm_declarative_mapping`
+
+ :meth:`_orm.registry.generate_base` - generates a base class
+ that will apply Declarative mapping to subclasses automatically
+ using a Python metaclass.
+
+ """
+ _as_declarative(self, cls, cls.__dict__)
+ return cls
+
+ def as_declarative_base(self, **kw):
+ """
+ Class decorator which will invoke
+ :meth:`_orm.registry.generate_base`
+ for a given base class.
+
+ E.g.::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+
+ @mapper_registry.as_declarative_base()
+ class Base(object):
+ @declared_attr
+ def __tablename__(cls):
+ return cls.__name__.lower()
+ id = Column(Integer, primary_key=True)
+
+ class MyMappedClass(Base):
+ # ...
+
+ All keyword arguments passed to
+ :meth:`_orm.registry.as_declarative_base` are passed
+ along to :meth:`_orm.registry.generate_base`.
+
+ """
+
+ def decorate(cls):
+ kw["cls"] = cls
+ kw["name"] = cls.__name__
+ return self.generate_base(**kw)
+
+ return decorate
+
+ def map_declaratively(self, cls):
+ """Map a class declaratively.
+
+ In this form of mapping, the class is scanned for mapping information,
+ including for columns to be associated with a table, and/or an
+ actual table object.
+
+ Returns the :class:`_orm.Mapper` object.
+
+ E.g.::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+
+ class Foo:
+ __tablename__ = 'some_table'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String)
+
+ mapper = mapper_registry.map_declaratively(Foo)
+
+ This function is more conveniently invoked indirectly via either the
+ :meth:`_orm.registry.mapped` class decorator or by subclassing a
+ declarative metaclass generated from
+ :meth:`_orm.registry.generate_base`.
+
+ See the section :ref:`orm_declarative_mapping` for complete
+ details and examples.
+
+ :param cls: class to be mapped.
+
+ :return: a :class:`_orm.Mapper` object.
+
+ .. seealso::
+
+ :ref:`orm_declarative_mapping`
+
+ :meth:`_orm.registry.mapped` - more common decorator interface
+ to this function.
+
+ :meth:`_orm.registry.map_imperatively`
+
+ """
+ return _as_declarative(self, cls, cls.__dict__)
+
+ def map_imperatively(self, class_, local_table=None, **kw):
+ r"""Map a class imperatively.
+
+ In this form of mapping, the class is not scanned for any mapping
+ information. Instead, all mapping constructs are passed as
+ arguments.
+
+ This method is intended to be fully equivalent to the classic
+ SQLAlchemy :func:`_orm.mapper` function, except that it's in terms of
+ a particular registry.
+
+ E.g.::
+
+ from sqlalchemy.orm import registry
+
+ mapper_registry = registry()
+
+ my_table = Table(
+ "my_table",
+ mapper_registry.metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ class MyClass:
+ pass
+
+ mapper_registry.map_imperatively(MyClass, my_table)
+
+ See the section :ref:`orm_imperative_mapping` for complete background
+ and usage examples.
+
+ :param class\_: The class to be mapped. Corresponds to the
+ :paramref:`_orm.mapper.class_` parameter.
+
+ :param local_table: the :class:`_schema.Table` or other
+ :class:`_sql.FromClause` object that is the subject of the mapping.
+ Corresponds to the
+ :paramref:`_orm.mapper.local_table` parameter.
+
+ :param \**kw: all other keyword arguments are passed to the
+ :func:`_orm.mapper` function directly.
+
+ .. seealso::
+
+ :ref:`orm_imperative_mapping`
+
+ :ref:`orm_declarative_mapping`
+
+ """
+ return _mapper(self, class_, local_table, kw)
+
+
+mapperlib._legacy_registry = registry()
+
+
+@util.deprecated_params(
+ bind=(
+ "2.0",
+ "The ``bind`` argument to as_declarative is "
+ "deprecated and will be removed in SQLAlchemy 2.0.",
+ )
+)
+def as_declarative(**kw):
+ """
+ Class decorator which will adapt a given class into a
+ :func:`_orm.declarative_base`.
+
+ This function makes use of the :meth:`_orm.registry.as_declarative_base`
+ method, by first creating a :class:`_orm.registry` automatically
+ and then invoking the decorator.
+
+ E.g.::
+
+ from sqlalchemy.orm import as_declarative
+
+ @as_declarative()
+ class Base(object):
+ @declared_attr
+ def __tablename__(cls):
+ return cls.__name__.lower()
+ id = Column(Integer, primary_key=True)
+
+ class MyMappedClass(Base):
+ # ...
+
+ .. seealso::
+
+ :meth:`_orm.registry.as_declarative_base`
+
+ """
+ bind, metadata, class_registry = (
+ kw.pop("bind", None),
+ kw.pop("metadata", None),
+ kw.pop("class_registry", None),
+ )
+
+ return registry(
+ _bind=bind, metadata=metadata, class_registry=class_registry
+ ).as_declarative_base(**kw)
+
+
+@inspection._inspects(DeclarativeMeta)
+def _inspect_decl_meta(cls):
+ mp = _inspect_mapped_class(cls)
+ if mp is None:
+ if _DeferredMapperConfig.has_cls(cls):
+ _DeferredMapperConfig.raise_unmapped_for_cls(cls)
+ raise orm_exc.UnmappedClassError(
+ cls,
+ msg="Class %s has a deferred mapping on it. It is not yet "
+ "usable as a mapped class." % orm_exc._safe_cls_name(cls),
+ )
+ return mp
diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py
new file mode 100644
index 0000000..6e1c797
--- /dev/null
+++ b/lib/sqlalchemy/orm/decl_base.py
@@ -0,0 +1,1210 @@
+# ext/declarative/base.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+"""Internal implementation for declarative."""
+from __future__ import absolute_import
+
+import collections
+import weakref
+
+from sqlalchemy.orm import attributes
+from sqlalchemy.orm import instrumentation
+from . import clsregistry
+from . import exc as orm_exc
+from . import mapper as mapperlib
+from .attributes import InstrumentedAttribute
+from .attributes import QueryableAttribute
+from .base import _is_mapped_class
+from .base import InspectionAttr
+from .descriptor_props import CompositeProperty
+from .descriptor_props import SynonymProperty
+from .interfaces import MapperProperty
+from .mapper import Mapper as mapper
+from .properties import ColumnProperty
+from .util import class_mapper
+from .. import event
+from .. import exc
+from .. import util
+from ..sql import expression
+from ..sql.schema import Column
+from ..sql.schema import Table
+from ..util import topological
+
+
+def _declared_mapping_info(cls):
+ # deferred mapping
+ if _DeferredMapperConfig.has_cls(cls):
+ return _DeferredMapperConfig.config_for_cls(cls)
+ # regular mapping
+ elif _is_mapped_class(cls):
+ return class_mapper(cls, configure=False)
+ else:
+ return None
+
+
+def _resolve_for_abstract_or_classical(cls):
+ if cls is object:
+ return None
+
+ if cls.__dict__.get("__abstract__", False):
+ for sup in cls.__bases__:
+ sup = _resolve_for_abstract_or_classical(sup)
+ if sup is not None:
+ return sup
+ else:
+ return None
+ else:
+ clsmanager = _dive_for_cls_manager(cls)
+
+ if clsmanager:
+ return clsmanager.class_
+ else:
+ return cls
+
+
+def _get_immediate_cls_attr(cls, attrname, strict=False):
+ """return an attribute of the class that is either present directly
+ on the class, e.g. not on a superclass, or is from a superclass but
+ this superclass is a non-mapped mixin, that is, not a descendant of
+ the declarative base and is also not classically mapped.
+
+ This is used to detect attributes that indicate something about
+ a mapped class independently from any mapped classes that it may
+ inherit from.
+
+ """
+
+ # the rules are different for this name than others,
+ # make sure we've moved it out. transitional
+ assert attrname != "__abstract__"
+
+ if not issubclass(cls, object):
+ return None
+
+ if attrname in cls.__dict__:
+ return getattr(cls, attrname)
+
+ for base in cls.__mro__[1:]:
+ _is_classicial_inherits = _dive_for_cls_manager(base)
+
+ if attrname in base.__dict__ and (
+ base is cls
+ or (
+ (base in cls.__bases__ if strict else True)
+ and not _is_classicial_inherits
+ )
+ ):
+ return getattr(base, attrname)
+ else:
+ return None
+
+
+def _dive_for_cls_manager(cls):
+ # because the class manager registration is pluggable,
+ # we need to do the search for every class in the hierarchy,
+ # rather than just a simple "cls._sa_class_manager"
+
+ # python 2 old style class
+ if not hasattr(cls, "__mro__"):
+ return None
+
+ for base in cls.__mro__:
+ manager = attributes.manager_of_class(base)
+ if manager:
+ return manager
+ return None
+
+
+def _as_declarative(registry, cls, dict_):
+
+ # declarative scans the class for attributes. no table or mapper
+ # args passed separately.
+
+ return _MapperConfig.setup_mapping(registry, cls, dict_, None, {})
+
+
+def _mapper(registry, cls, table, mapper_kw):
+ _ImperativeMapperConfig(registry, cls, table, mapper_kw)
+ return cls.__mapper__
+
+
+@util.preload_module("sqlalchemy.orm.decl_api")
+def _is_declarative_props(obj):
+ declared_attr = util.preloaded.orm_decl_api.declared_attr
+
+ return isinstance(obj, (declared_attr, util.classproperty))
+
+
+def _check_declared_props_nocascade(obj, name, cls):
+ if _is_declarative_props(obj):
+ if getattr(obj, "_cascading", False):
+ util.warn(
+ "@declared_attr.cascading is not supported on the %s "
+ "attribute on class %s. This attribute invokes for "
+ "subclasses in any case." % (name, cls)
+ )
+ return True
+ else:
+ return False
+
+
+class _MapperConfig(object):
+ __slots__ = (
+ "cls",
+ "classname",
+ "properties",
+ "declared_attr_reg",
+ "__weakref__",
+ )
+
+ @classmethod
+ def setup_mapping(cls, registry, cls_, dict_, table, mapper_kw):
+ manager = attributes.manager_of_class(cls)
+ if manager and manager.class_ is cls_:
+ raise exc.InvalidRequestError(
+ "Class %r already has been " "instrumented declaratively" % cls
+ )
+
+ if cls_.__dict__.get("__abstract__", False):
+ return
+
+ defer_map = _get_immediate_cls_attr(
+ cls_, "_sa_decl_prepare_nocascade", strict=True
+ ) or hasattr(cls_, "_sa_decl_prepare")
+
+ if defer_map:
+ cfg_cls = _DeferredMapperConfig
+ else:
+ cfg_cls = _ClassScanMapperConfig
+
+ return cfg_cls(registry, cls_, dict_, table, mapper_kw)
+
+ def __init__(self, registry, cls_, mapper_kw):
+ self.cls = util.assert_arg_type(cls_, type, "cls_")
+ self.classname = cls_.__name__
+ self.properties = util.OrderedDict()
+ self.declared_attr_reg = {}
+
+ if not mapper_kw.get("non_primary", False):
+ instrumentation.register_class(
+ self.cls,
+ finalize=False,
+ registry=registry,
+ declarative_scan=self,
+ init_method=registry.constructor,
+ )
+ else:
+ manager = attributes.manager_of_class(self.cls)
+ if not manager or not manager.is_mapped:
+ raise exc.InvalidRequestError(
+ "Class %s has no primary mapper configured. Configure "
+ "a primary mapper first before setting up a non primary "
+ "Mapper." % self.cls
+ )
+
+ def set_cls_attribute(self, attrname, value):
+
+ manager = instrumentation.manager_of_class(self.cls)
+ manager.install_member(attrname, value)
+ return value
+
+ def _early_mapping(self, mapper_kw):
+ self.map(mapper_kw)
+
+
+class _ImperativeMapperConfig(_MapperConfig):
+ __slots__ = ("dict_", "local_table", "inherits")
+
+ def __init__(
+ self,
+ registry,
+ cls_,
+ table,
+ mapper_kw,
+ ):
+ super(_ImperativeMapperConfig, self).__init__(
+ registry, cls_, mapper_kw
+ )
+
+ self.dict_ = {}
+ self.local_table = self.set_cls_attribute("__table__", table)
+
+ with mapperlib._CONFIGURE_MUTEX:
+ if not mapper_kw.get("non_primary", False):
+ clsregistry.add_class(
+ self.classname, self.cls, registry._class_registry
+ )
+
+ self._setup_inheritance(mapper_kw)
+
+ self._early_mapping(mapper_kw)
+
+ def map(self, mapper_kw=util.EMPTY_DICT):
+ mapper_cls = mapper
+
+ return self.set_cls_attribute(
+ "__mapper__",
+ mapper_cls(self.cls, self.local_table, **mapper_kw),
+ )
+
+ def _setup_inheritance(self, mapper_kw):
+ cls = self.cls
+
+ inherits = mapper_kw.get("inherits", None)
+
+ if inherits is None:
+ # since we search for classical mappings now, search for
+ # multiple mapped bases as well and raise an error.
+ inherits_search = []
+ for c in cls.__bases__:
+ c = _resolve_for_abstract_or_classical(c)
+ if c is None:
+ continue
+ if _declared_mapping_info(
+ c
+ ) is not None and not _get_immediate_cls_attr(
+ c, "_sa_decl_prepare_nocascade", strict=True
+ ):
+ inherits_search.append(c)
+
+ if inherits_search:
+ if len(inherits_search) > 1:
+ raise exc.InvalidRequestError(
+ "Class %s has multiple mapped bases: %r"
+ % (cls, inherits_search)
+ )
+ inherits = inherits_search[0]
+ elif isinstance(inherits, mapper):
+ inherits = inherits.class_
+
+ self.inherits = inherits
+
+
+class _ClassScanMapperConfig(_MapperConfig):
+ __slots__ = (
+ "dict_",
+ "local_table",
+ "persist_selectable",
+ "declared_columns",
+ "column_copies",
+ "table_args",
+ "tablename",
+ "mapper_args",
+ "mapper_args_fn",
+ "inherits",
+ )
+
+ def __init__(
+ self,
+ registry,
+ cls_,
+ dict_,
+ table,
+ mapper_kw,
+ ):
+
+ # grab class dict before the instrumentation manager has been added.
+ # reduces cycles
+ self.dict_ = dict(dict_) if dict_ else {}
+
+ super(_ClassScanMapperConfig, self).__init__(registry, cls_, mapper_kw)
+
+ self.persist_selectable = None
+ self.declared_columns = set()
+ self.column_copies = {}
+ self._setup_declared_events()
+
+ self._scan_attributes()
+
+ with mapperlib._CONFIGURE_MUTEX:
+ clsregistry.add_class(
+ self.classname, self.cls, registry._class_registry
+ )
+
+ self._extract_mappable_attributes()
+
+ self._extract_declared_columns()
+
+ self._setup_table(table)
+
+ self._setup_inheritance(mapper_kw)
+
+ self._early_mapping(mapper_kw)
+
+ def _setup_declared_events(self):
+ if _get_immediate_cls_attr(self.cls, "__declare_last__"):
+
+ @event.listens_for(mapper, "after_configured")
+ def after_configured():
+ self.cls.__declare_last__()
+
+ if _get_immediate_cls_attr(self.cls, "__declare_first__"):
+
+ @event.listens_for(mapper, "before_configured")
+ def before_configured():
+ self.cls.__declare_first__()
+
+ def _cls_attr_override_checker(self, cls):
+ """Produce a function that checks if a class has overridden an
+ attribute, taking SQLAlchemy-enabled dataclass fields into account.
+
+ """
+ sa_dataclass_metadata_key = _get_immediate_cls_attr(
+ cls, "__sa_dataclass_metadata_key__", None
+ )
+
+ if sa_dataclass_metadata_key is None:
+
+ def attribute_is_overridden(key, obj):
+ return getattr(cls, key) is not obj
+
+ else:
+
+ all_datacls_fields = {
+ f.name: f.metadata[sa_dataclass_metadata_key]
+ for f in util.dataclass_fields(cls)
+ if sa_dataclass_metadata_key in f.metadata
+ }
+ local_datacls_fields = {
+ f.name: f.metadata[sa_dataclass_metadata_key]
+ for f in util.local_dataclass_fields(cls)
+ if sa_dataclass_metadata_key in f.metadata
+ }
+
+ absent = object()
+
+ def attribute_is_overridden(key, obj):
+ if _is_declarative_props(obj):
+ obj = obj.fget
+
+ # this function likely has some failure modes still if
+ # someone is doing a deep mixing of the same attribute
+ # name as plain Python attribute vs. dataclass field.
+
+ ret = local_datacls_fields.get(key, absent)
+ if _is_declarative_props(ret):
+ ret = ret.fget
+
+ if ret is obj:
+ return False
+ elif ret is not absent:
+ return True
+
+ all_field = all_datacls_fields.get(key, absent)
+
+ ret = getattr(cls, key, obj)
+
+ if ret is obj:
+ return False
+
+ # for dataclasses, this could be the
+ # 'default' of the field. so filter more specifically
+ # for an already-mapped InstrumentedAttribute
+ if ret is not absent and isinstance(
+ ret, InstrumentedAttribute
+ ):
+ return True
+
+ if all_field is obj:
+ return False
+ elif all_field is not absent:
+ return True
+
+ # can't find another attribute
+ return False
+
+ return attribute_is_overridden
+
+ def _cls_attr_resolver(self, cls):
+ """produce a function to iterate the "attributes" of a class,
+ adjusting for SQLAlchemy fields embedded in dataclass fields.
+
+ """
+ sa_dataclass_metadata_key = _get_immediate_cls_attr(
+ cls, "__sa_dataclass_metadata_key__", None
+ )
+
+ if sa_dataclass_metadata_key is None:
+
+ def local_attributes_for_class():
+ for name, obj in vars(cls).items():
+ yield name, obj, False
+
+ else:
+ field_names = set()
+
+ def local_attributes_for_class():
+ for field in util.local_dataclass_fields(cls):
+ if sa_dataclass_metadata_key in field.metadata:
+ field_names.add(field.name)
+ yield field.name, _as_dc_declaredattr(
+ field.metadata, sa_dataclass_metadata_key
+ ), True
+ for name, obj in vars(cls).items():
+ if name not in field_names:
+ yield name, obj, False
+
+ return local_attributes_for_class
+
+ def _scan_attributes(self):
+ cls = self.cls
+ dict_ = self.dict_
+ column_copies = self.column_copies
+ mapper_args_fn = None
+ table_args = inherited_table_args = None
+ tablename = None
+
+ attribute_is_overridden = self._cls_attr_override_checker(self.cls)
+
+ bases = []
+
+ for base in cls.__mro__:
+ # collect bases and make sure standalone columns are copied
+ # to be the column they will ultimately be on the class,
+ # so that declared_attr functions use the right columns.
+ # need to do this all the way up the hierarchy first
+ # (see #8190)
+
+ class_mapped = (
+ base is not cls
+ and _declared_mapping_info(base) is not None
+ and not _get_immediate_cls_attr(
+ base, "_sa_decl_prepare_nocascade", strict=True
+ )
+ )
+
+ local_attributes_for_class = self._cls_attr_resolver(base)
+
+ if not class_mapped and base is not cls:
+ locally_collected_columns = self._produce_column_copies(
+ local_attributes_for_class,
+ attribute_is_overridden,
+ )
+ else:
+ locally_collected_columns = {}
+
+ bases.append(
+ (
+ base,
+ class_mapped,
+ local_attributes_for_class,
+ locally_collected_columns,
+ )
+ )
+
+ for (
+ base,
+ class_mapped,
+ local_attributes_for_class,
+ locally_collected_columns,
+ ) in bases:
+
+ # this transfer can also take place as we scan each name
+ # for finer-grained control of how collected_attributes is
+ # populated, as this is what impacts column ordering.
+ # however it's simpler to get it out of the way here.
+ dict_.update(locally_collected_columns)
+
+ for name, obj, is_dataclass in local_attributes_for_class():
+ if name == "__mapper_args__":
+ check_decl = _check_declared_props_nocascade(
+ obj, name, cls
+ )
+ if not mapper_args_fn and (not class_mapped or check_decl):
+ # don't even invoke __mapper_args__ until
+ # after we've determined everything about the
+ # mapped table.
+ # make a copy of it so a class-level dictionary
+ # is not overwritten when we update column-based
+ # arguments.
+ def mapper_args_fn():
+ return dict(cls.__mapper_args__)
+
+ elif name == "__tablename__":
+ check_decl = _check_declared_props_nocascade(
+ obj, name, cls
+ )
+ if not tablename and (not class_mapped or check_decl):
+ tablename = cls.__tablename__
+ elif name == "__table_args__":
+ check_decl = _check_declared_props_nocascade(
+ obj, name, cls
+ )
+ if not table_args and (not class_mapped or check_decl):
+ table_args = cls.__table_args__
+ if not isinstance(
+ table_args, (tuple, dict, type(None))
+ ):
+ raise exc.ArgumentError(
+ "__table_args__ value must be a tuple, "
+ "dict, or None"
+ )
+ if base is not cls:
+ inherited_table_args = True
+ elif class_mapped:
+ if _is_declarative_props(obj):
+ util.warn(
+ "Regular (i.e. not __special__) "
+ "attribute '%s.%s' uses @declared_attr, "
+ "but owning class %s is mapped - "
+ "not applying to subclass %s."
+ % (base.__name__, name, base, cls)
+ )
+ continue
+ elif base is not cls:
+ # we're a mixin, abstract base, or something that is
+ # acting like that for now.
+ if isinstance(obj, Column):
+ # already copied columns to the mapped class.
+ continue
+ elif isinstance(obj, MapperProperty):
+ raise exc.InvalidRequestError(
+ "Mapper properties (i.e. deferred,"
+ "column_property(), relationship(), etc.) must "
+ "be declared as @declared_attr callables "
+ "on declarative mixin classes. For dataclass "
+ "field() objects, use a lambda:"
+ )
+ elif _is_declarative_props(obj):
+ if obj._cascading:
+ if name in dict_:
+ # unfortunately, while we can use the user-
+ # defined attribute here to allow a clean
+ # override, if there's another
+ # subclass below then it still tries to use
+ # this. not sure if there is enough
+ # information here to add this as a feature
+ # later on.
+ util.warn(
+ "Attribute '%s' on class %s cannot be "
+ "processed due to "
+ "@declared_attr.cascading; "
+ "skipping" % (name, cls)
+ )
+ dict_[name] = column_copies[
+ obj
+ ] = ret = obj.__get__(obj, cls)
+ setattr(cls, name, ret)
+ else:
+ if is_dataclass:
+ # access attribute using normal class access
+ # first, to see if it's been mapped on a
+ # superclass. note if the dataclasses.field()
+ # has "default", this value can be anything.
+ ret = getattr(cls, name, None)
+
+ # so, if it's anything that's not ORM
+ # mapped, assume we should invoke the
+ # declared_attr
+ if not isinstance(ret, InspectionAttr):
+ ret = obj.fget()
+ else:
+ # access attribute using normal class access.
+ # if the declared attr already took place
+ # on a superclass that is mapped, then
+ # this is no longer a declared_attr, it will
+ # be the InstrumentedAttribute
+ ret = getattr(cls, name)
+
+ # correct for proxies created from hybrid_property
+ # or similar. note there is no known case that
+ # produces nested proxies, so we are only
+ # looking one level deep right now.
+ if (
+ isinstance(ret, InspectionAttr)
+ and ret._is_internal_proxy
+ and not isinstance(
+ ret.original_property, MapperProperty
+ )
+ ):
+ ret = ret.descriptor
+
+ dict_[name] = column_copies[obj] = ret
+ if (
+ isinstance(ret, (Column, MapperProperty))
+ and ret.doc is None
+ ):
+ ret.doc = obj.__doc__
+ # here, the attribute is some other kind of property that
+ # we assume is not part of the declarative mapping.
+ # however, check for some more common mistakes
+ else:
+ self._warn_for_decl_attributes(base, name, obj)
+ elif is_dataclass and (
+ name not in dict_ or dict_[name] is not obj
+ ):
+ # here, we are definitely looking at the target class
+ # and not a superclass. this is currently a
+ # dataclass-only path. if the name is only
+ # a dataclass field and isn't in local cls.__dict__,
+ # put the object there.
+ # assert that the dataclass-enabled resolver agrees
+ # with what we are seeing
+
+ assert not attribute_is_overridden(name, obj)
+
+ if _is_declarative_props(obj):
+ obj = obj.fget()
+
+ dict_[name] = obj
+
+ if inherited_table_args and not tablename:
+ table_args = None
+
+ self.table_args = table_args
+ self.tablename = tablename
+ self.mapper_args_fn = mapper_args_fn
+
+ def _warn_for_decl_attributes(self, cls, key, c):
+ if isinstance(c, expression.ColumnClause):
+ util.warn(
+ "Attribute '%s' on class %s appears to be a non-schema "
+ "'sqlalchemy.sql.column()' "
+ "object; this won't be part of the declarative mapping"
+ % (key, cls)
+ )
+
+ def _produce_column_copies(
+ self, attributes_for_class, attribute_is_overridden
+ ):
+ cls = self.cls
+ dict_ = self.dict_
+ locally_collected_attributes = {}
+ column_copies = self.column_copies
+ # copy mixin columns to the mapped class
+
+ for name, obj, is_dataclass in attributes_for_class():
+ if isinstance(obj, Column):
+ if attribute_is_overridden(name, obj):
+ # if column has been overridden
+ # (like by the InstrumentedAttribute of the
+ # superclass), skip
+ continue
+ elif obj.foreign_keys:
+ raise exc.InvalidRequestError(
+ "Columns with foreign keys to other columns "
+ "must be declared as @declared_attr callables "
+ "on declarative mixin classes. For dataclass "
+ "field() objects, use a lambda:."
+ )
+ elif name not in dict_ and not (
+ "__table__" in dict_
+ and (obj.name or name) in dict_["__table__"].c
+ ):
+ column_copies[obj] = copy_ = obj._copy()
+ copy_._creation_order = obj._creation_order
+ setattr(cls, name, copy_)
+ locally_collected_attributes[name] = copy_
+ return locally_collected_attributes
+
+ def _extract_mappable_attributes(self):
+ cls = self.cls
+ dict_ = self.dict_
+
+ our_stuff = self.properties
+
+ late_mapped = _get_immediate_cls_attr(
+ cls, "_sa_decl_prepare_nocascade", strict=True
+ )
+
+ for k in list(dict_):
+
+ if k in ("__table__", "__tablename__", "__mapper_args__"):
+ continue
+
+ value = dict_[k]
+ if _is_declarative_props(value):
+ if value._cascading:
+ util.warn(
+ "Use of @declared_attr.cascading only applies to "
+ "Declarative 'mixin' and 'abstract' classes. "
+ "Currently, this flag is ignored on mapped class "
+ "%s" % self.cls
+ )
+
+ value = getattr(cls, k)
+
+ elif (
+ isinstance(value, QueryableAttribute)
+ and value.class_ is not cls
+ and value.key != k
+ ):
+ # detect a QueryableAttribute that's already mapped being
+ # assigned elsewhere in userland, turn into a synonym()
+ value = SynonymProperty(value.key)
+ setattr(cls, k, value)
+
+ if (
+ isinstance(value, tuple)
+ and len(value) == 1
+ and isinstance(value[0], (Column, MapperProperty))
+ ):
+ util.warn(
+ "Ignoring declarative-like tuple value of attribute "
+ "'%s': possibly a copy-and-paste error with a comma "
+ "accidentally placed at the end of the line?" % k
+ )
+ continue
+ elif not isinstance(value, (Column, MapperProperty)):
+ # using @declared_attr for some object that
+ # isn't Column/MapperProperty; remove from the dict_
+ # and place the evaluated value onto the class.
+ if not k.startswith("__"):
+ dict_.pop(k)
+ self._warn_for_decl_attributes(cls, k, value)
+ if not late_mapped:
+ setattr(cls, k, value)
+ continue
+ # we expect to see the name 'metadata' in some valid cases;
+ # however at this point we see it's assigned to something trying
+ # to be mapped, so raise for that.
+ elif k == "metadata":
+ raise exc.InvalidRequestError(
+ "Attribute name 'metadata' is reserved "
+ "for the MetaData instance when using a "
+ "declarative base class."
+ )
+ our_stuff[k] = value
+
+ def _extract_declared_columns(self):
+ our_stuff = self.properties
+
+ # set up attributes in the order they were created
+ util.sort_dictionary(
+ our_stuff, key=lambda key: our_stuff[key]._creation_order
+ )
+
+ # extract columns from the class dict
+ declared_columns = self.declared_columns
+ name_to_prop_key = collections.defaultdict(set)
+ for key, c in list(our_stuff.items()):
+ if isinstance(c, (ColumnProperty, CompositeProperty)):
+ for col in c.columns:
+ if isinstance(col, Column) and col.table is None:
+ _undefer_column_name(key, col)
+ if not isinstance(c, CompositeProperty):
+ name_to_prop_key[col.name].add(key)
+ declared_columns.add(col)
+ elif isinstance(c, Column):
+ _undefer_column_name(key, c)
+ name_to_prop_key[c.name].add(key)
+ declared_columns.add(c)
+ # if the column is the same name as the key,
+ # remove it from the explicit properties dict.
+ # the normal rules for assigning column-based properties
+ # will take over, including precedence of columns
+ # in multi-column ColumnProperties.
+ if key == c.key:
+ del our_stuff[key]
+
+ for name, keys in name_to_prop_key.items():
+ if len(keys) > 1:
+ util.warn(
+ "On class %r, Column object %r named "
+ "directly multiple times, "
+ "only one will be used: %s. "
+ "Consider using orm.synonym instead"
+ % (self.classname, name, (", ".join(sorted(keys))))
+ )
+
+ def _setup_table(self, table=None):
+ cls = self.cls
+ tablename = self.tablename
+ table_args = self.table_args
+ dict_ = self.dict_
+ declared_columns = self.declared_columns
+
+ manager = attributes.manager_of_class(cls)
+
+ declared_columns = self.declared_columns = sorted(
+ declared_columns, key=lambda c: c._creation_order
+ )
+
+ if "__table__" not in dict_ and table is None:
+ if hasattr(cls, "__table_cls__"):
+ table_cls = util.unbound_method_to_callable(cls.__table_cls__)
+ else:
+ table_cls = Table
+
+ if tablename is not None:
+
+ args, table_kw = (), {}
+ if table_args:
+ if isinstance(table_args, dict):
+ table_kw = table_args
+ elif isinstance(table_args, tuple):
+ if isinstance(table_args[-1], dict):
+ args, table_kw = table_args[0:-1], table_args[-1]
+ else:
+ args = table_args
+
+ autoload_with = dict_.get("__autoload_with__")
+ if autoload_with:
+ table_kw["autoload_with"] = autoload_with
+
+ autoload = dict_.get("__autoload__")
+ if autoload:
+ table_kw["autoload"] = True
+
+ table = self.set_cls_attribute(
+ "__table__",
+ table_cls(
+ tablename,
+ self._metadata_for_cls(manager),
+ *(tuple(declared_columns) + tuple(args)),
+ **table_kw
+ ),
+ )
+ else:
+ if table is None:
+ table = cls.__table__
+ if declared_columns:
+ for c in declared_columns:
+ if not table.c.contains_column(c):
+ raise exc.ArgumentError(
+ "Can't add additional column %r when "
+ "specifying __table__" % c.key
+ )
+ self.local_table = table
+
+ def _metadata_for_cls(self, manager):
+ if hasattr(self.cls, "metadata"):
+ return self.cls.metadata
+ else:
+ return manager.registry.metadata
+
+ def _setup_inheritance(self, mapper_kw):
+ table = self.local_table
+ cls = self.cls
+ table_args = self.table_args
+ declared_columns = self.declared_columns
+
+ inherits = mapper_kw.get("inherits", None)
+
+ if inherits is None:
+ # since we search for classical mappings now, search for
+ # multiple mapped bases as well and raise an error.
+ inherits_search = []
+ for c in cls.__bases__:
+ c = _resolve_for_abstract_or_classical(c)
+ if c is None:
+ continue
+ if _declared_mapping_info(
+ c
+ ) is not None and not _get_immediate_cls_attr(
+ c, "_sa_decl_prepare_nocascade", strict=True
+ ):
+ if c not in inherits_search:
+ inherits_search.append(c)
+
+ if inherits_search:
+ if len(inherits_search) > 1:
+ raise exc.InvalidRequestError(
+ "Class %s has multiple mapped bases: %r"
+ % (cls, inherits_search)
+ )
+ inherits = inherits_search[0]
+ elif isinstance(inherits, mapper):
+ inherits = inherits.class_
+
+ self.inherits = inherits
+
+ if (
+ table is None
+ and self.inherits is None
+ and not _get_immediate_cls_attr(cls, "__no_table__")
+ ):
+
+ raise exc.InvalidRequestError(
+ "Class %r does not have a __table__ or __tablename__ "
+ "specified and does not inherit from an existing "
+ "table-mapped class." % cls
+ )
+ elif self.inherits:
+ inherited_mapper = _declared_mapping_info(self.inherits)
+ inherited_table = inherited_mapper.local_table
+ inherited_persist_selectable = inherited_mapper.persist_selectable
+
+ if table is None:
+ # single table inheritance.
+ # ensure no table args
+ if table_args:
+ raise exc.ArgumentError(
+ "Can't place __table_args__ on an inherited class "
+ "with no table."
+ )
+ # add any columns declared here to the inherited table.
+ for c in declared_columns:
+ if c.name in inherited_table.c:
+ if inherited_table.c[c.name] is c:
+ continue
+ raise exc.ArgumentError(
+ "Column '%s' on class %s conflicts with "
+ "existing column '%s'"
+ % (c, cls, inherited_table.c[c.name])
+ )
+ if c.primary_key:
+ raise exc.ArgumentError(
+ "Can't place primary key columns on an inherited "
+ "class with no table."
+ )
+ inherited_table.append_column(c)
+ if (
+ inherited_persist_selectable is not None
+ and inherited_persist_selectable is not inherited_table
+ ):
+ inherited_persist_selectable._refresh_for_new_column(c)
+
+ def _prepare_mapper_arguments(self, mapper_kw):
+ properties = self.properties
+
+ if self.mapper_args_fn:
+ mapper_args = self.mapper_args_fn()
+ else:
+ mapper_args = {}
+
+ if mapper_kw:
+ mapper_args.update(mapper_kw)
+
+ if "properties" in mapper_args:
+ properties = dict(properties)
+ properties.update(mapper_args["properties"])
+
+ # make sure that column copies are used rather
+ # than the original columns from any mixins
+ for k in ("version_id_col", "polymorphic_on"):
+ if k in mapper_args:
+ v = mapper_args[k]
+ mapper_args[k] = self.column_copies.get(v, v)
+
+ if "inherits" in mapper_args:
+ inherits_arg = mapper_args["inherits"]
+ if isinstance(inherits_arg, mapper):
+ inherits_arg = inherits_arg.class_
+
+ if inherits_arg is not self.inherits:
+ raise exc.InvalidRequestError(
+ "mapper inherits argument given for non-inheriting "
+ "class %s" % (mapper_args["inherits"])
+ )
+
+ if self.inherits:
+ mapper_args["inherits"] = self.inherits
+
+ if self.inherits and not mapper_args.get("concrete", False):
+ # single or joined inheritance
+ # exclude any cols on the inherited table which are
+ # not mapped on the parent class, to avoid
+ # mapping columns specific to sibling/nephew classes
+ inherited_mapper = _declared_mapping_info(self.inherits)
+ inherited_table = inherited_mapper.local_table
+
+ if "exclude_properties" not in mapper_args:
+ mapper_args["exclude_properties"] = exclude_properties = set(
+ [
+ c.key
+ for c in inherited_table.c
+ if c not in inherited_mapper._columntoproperty
+ ]
+ ).union(inherited_mapper.exclude_properties or ())
+ exclude_properties.difference_update(
+ [c.key for c in self.declared_columns]
+ )
+
+ # look through columns in the current mapper that
+ # are keyed to a propname different than the colname
+ # (if names were the same, we'd have popped it out above,
+ # in which case the mapper makes this combination).
+ # See if the superclass has a similar column property.
+ # If so, join them together.
+ for k, col in list(properties.items()):
+ if not isinstance(col, expression.ColumnElement):
+ continue
+ if k in inherited_mapper._props:
+ p = inherited_mapper._props[k]
+ if isinstance(p, ColumnProperty):
+ # note here we place the subclass column
+ # first. See [ticket:1892] for background.
+ properties[k] = [col] + p.columns
+ result_mapper_args = mapper_args.copy()
+ result_mapper_args["properties"] = properties
+ self.mapper_args = result_mapper_args
+
+ def map(self, mapper_kw=util.EMPTY_DICT):
+ self._prepare_mapper_arguments(mapper_kw)
+ if hasattr(self.cls, "__mapper_cls__"):
+ mapper_cls = util.unbound_method_to_callable(
+ self.cls.__mapper_cls__
+ )
+ else:
+ mapper_cls = mapper
+
+ return self.set_cls_attribute(
+ "__mapper__",
+ mapper_cls(self.cls, self.local_table, **self.mapper_args),
+ )
+
+
+@util.preload_module("sqlalchemy.orm.decl_api")
+def _as_dc_declaredattr(field_metadata, sa_dataclass_metadata_key):
+ # wrap lambdas inside dataclass fields inside an ad-hoc declared_attr.
+ # we can't write it because field.metadata is immutable :( so we have
+ # to go through extra trouble to compare these
+ decl_api = util.preloaded.orm_decl_api
+ obj = field_metadata[sa_dataclass_metadata_key]
+ if callable(obj) and not isinstance(obj, decl_api.declared_attr):
+ return decl_api.declared_attr(obj)
+ else:
+ return obj
+
+
+class _DeferredMapperConfig(_ClassScanMapperConfig):
+ _configs = util.OrderedDict()
+
+ def _early_mapping(self, mapper_kw):
+ pass
+
+ @property
+ def cls(self):
+ return self._cls()
+
+ @cls.setter
+ def cls(self, class_):
+ self._cls = weakref.ref(class_, self._remove_config_cls)
+ self._configs[self._cls] = self
+
+ @classmethod
+ def _remove_config_cls(cls, ref):
+ cls._configs.pop(ref, None)
+
+ @classmethod
+ def has_cls(cls, class_):
+ # 2.6 fails on weakref if class_ is an old style class
+ return isinstance(class_, type) and weakref.ref(class_) in cls._configs
+
+ @classmethod
+ def raise_unmapped_for_cls(cls, class_):
+ if hasattr(class_, "_sa_raise_deferred_config"):
+ class_._sa_raise_deferred_config()
+
+ raise orm_exc.UnmappedClassError(
+ class_,
+ msg="Class %s has a deferred mapping on it. It is not yet "
+ "usable as a mapped class." % orm_exc._safe_cls_name(class_),
+ )
+
+ @classmethod
+ def config_for_cls(cls, class_):
+ return cls._configs[weakref.ref(class_)]
+
+ @classmethod
+ def classes_for_base(cls, base_cls, sort=True):
+ classes_for_base = [
+ m
+ for m, cls_ in [(m, m.cls) for m in cls._configs.values()]
+ if cls_ is not None and issubclass(cls_, base_cls)
+ ]
+
+ if not sort:
+ return classes_for_base
+
+ all_m_by_cls = dict((m.cls, m) for m in classes_for_base)
+
+ tuples = []
+ for m_cls in all_m_by_cls:
+ tuples.extend(
+ (all_m_by_cls[base_cls], all_m_by_cls[m_cls])
+ for base_cls in m_cls.__bases__
+ if base_cls in all_m_by_cls
+ )
+ return list(topological.sort(tuples, classes_for_base))
+
+ def map(self, mapper_kw=util.EMPTY_DICT):
+ self._configs.pop(self._cls, None)
+ return super(_DeferredMapperConfig, self).map(mapper_kw)
+
+
+def _add_attribute(cls, key, value):
+ """add an attribute to an existing declarative class.
+
+ This runs through the logic to determine MapperProperty,
+ adds it to the Mapper, adds a column to the mapped Table, etc.
+
+ """
+
+ if "__mapper__" in cls.__dict__:
+ if isinstance(value, Column):
+ _undefer_column_name(key, value)
+ cls.__table__.append_column(value, replace_existing=True)
+ cls.__mapper__.add_property(key, value)
+ elif isinstance(value, ColumnProperty):
+ for col in value.columns:
+ if isinstance(col, Column) and col.table is None:
+ _undefer_column_name(key, col)
+ cls.__table__.append_column(col, replace_existing=True)
+ cls.__mapper__.add_property(key, value)
+ elif isinstance(value, MapperProperty):
+ cls.__mapper__.add_property(key, value)
+ elif isinstance(value, QueryableAttribute) and value.key != key:
+ # detect a QueryableAttribute that's already mapped being
+ # assigned elsewhere in userland, turn into a synonym()
+ value = SynonymProperty(value.key)
+ cls.__mapper__.add_property(key, value)
+ else:
+ type.__setattr__(cls, key, value)
+ cls.__mapper__._expire_memoizations()
+ else:
+ type.__setattr__(cls, key, value)
+
+
+def _del_attribute(cls, key):
+
+ if (
+ "__mapper__" in cls.__dict__
+ and key in cls.__dict__
+ and not cls.__mapper__._dispose_called
+ ):
+ value = cls.__dict__[key]
+ if isinstance(
+ value, (Column, ColumnProperty, MapperProperty, QueryableAttribute)
+ ):
+ raise NotImplementedError(
+ "Can't un-map individual mapped attributes on a mapped class."
+ )
+ else:
+ type.__delattr__(cls, key)
+ cls.__mapper__._expire_memoizations()
+ else:
+ type.__delattr__(cls, key)
+
+
+def _declarative_constructor(self, **kwargs):
+ """A simple constructor that allows initialization from kwargs.
+
+ Sets attributes on the constructed instance using the names and
+ values in ``kwargs``.
+
+ Only keys that are present as
+ attributes of the instance's class are allowed. These could be,
+ for example, any mapped columns or relationships.
+ """
+ cls_ = type(self)
+ for k in kwargs:
+ if not hasattr(cls_, k):
+ raise TypeError(
+ "%r is an invalid keyword argument for %s" % (k, cls_.__name__)
+ )
+ setattr(self, k, kwargs[k])
+
+
+_declarative_constructor.__name__ = "__init__"
+
+
+def _undefer_column_name(key, column):
+ if column.key is None:
+ column.key = key
+ if column.name is None:
+ column.name = key
diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py
new file mode 100644
index 0000000..1b5be9a
--- /dev/null
+++ b/lib/sqlalchemy/orm/dependency.py
@@ -0,0 +1,1290 @@
+# orm/dependency.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Relationship dependencies.
+
+"""
+
+from . import attributes
+from . import exc
+from . import sync
+from . import unitofwork
+from . import util as mapperutil
+from .interfaces import MANYTOMANY
+from .interfaces import MANYTOONE
+from .interfaces import ONETOMANY
+from .. import exc as sa_exc
+from .. import sql
+from .. import util
+
+
+class DependencyProcessor(object):
+ def __init__(self, prop):
+ self.prop = prop
+ self.cascade = prop.cascade
+ self.mapper = prop.mapper
+ self.parent = prop.parent
+ self.secondary = prop.secondary
+ self.direction = prop.direction
+ self.post_update = prop.post_update
+ self.passive_deletes = prop.passive_deletes
+ self.passive_updates = prop.passive_updates
+ self.enable_typechecks = prop.enable_typechecks
+ if self.passive_deletes:
+ self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
+ else:
+ self._passive_delete_flag = attributes.PASSIVE_OFF
+ if self.passive_updates:
+ self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
+ else:
+ self._passive_update_flag = attributes.PASSIVE_OFF
+
+ self.sort_key = "%s_%s" % (self.parent._sort_key, prop.key)
+ self.key = prop.key
+ if not self.prop.synchronize_pairs:
+ raise sa_exc.ArgumentError(
+ "Can't build a DependencyProcessor for relationship %s. "
+ "No target attributes to populate between parent and "
+ "child are present" % self.prop
+ )
+
+ @classmethod
+ def from_relationship(cls, prop):
+ return _direction_to_processor[prop.direction](prop)
+
+ def hasparent(self, state):
+ """return True if the given object instance has a parent,
+ according to the ``InstrumentedAttribute`` handled by this
+ ``DependencyProcessor``.
+
+ """
+ return self.parent.class_manager.get_impl(self.key).hasparent(state)
+
+ def per_property_preprocessors(self, uow):
+ """establish actions and dependencies related to a flush.
+
+ These actions will operate on all relevant states in
+ the aggregate.
+
+ """
+ uow.register_preprocessor(self, True)
+
+ def per_property_flush_actions(self, uow):
+ after_save = unitofwork.ProcessAll(uow, self, False, True)
+ before_delete = unitofwork.ProcessAll(uow, self, True, True)
+
+ parent_saves = unitofwork.SaveUpdateAll(
+ uow, self.parent.primary_base_mapper
+ )
+ child_saves = unitofwork.SaveUpdateAll(
+ uow, self.mapper.primary_base_mapper
+ )
+
+ parent_deletes = unitofwork.DeleteAll(
+ uow, self.parent.primary_base_mapper
+ )
+ child_deletes = unitofwork.DeleteAll(
+ uow, self.mapper.primary_base_mapper
+ )
+
+ self.per_property_dependencies(
+ uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
+ before_delete,
+ )
+
+ def per_state_flush_actions(self, uow, states, isdelete):
+ """establish actions and dependencies related to a flush.
+
+ These actions will operate on all relevant states
+ individually. This occurs only if there are cycles
+ in the 'aggregated' version of events.
+
+ """
+
+ child_base_mapper = self.mapper.primary_base_mapper
+ child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
+ child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
+
+ # locate and disable the aggregate processors
+ # for this dependency
+
+ if isdelete:
+ before_delete = unitofwork.ProcessAll(uow, self, True, True)
+ before_delete.disabled = True
+ else:
+ after_save = unitofwork.ProcessAll(uow, self, False, True)
+ after_save.disabled = True
+
+ # check if the "child" side is part of the cycle
+
+ if child_saves not in uow.cycles:
+ # based on the current dependencies we use, the saves/
+ # deletes should always be in the 'cycles' collection
+ # together. if this changes, we will have to break up
+ # this method a bit more.
+ assert child_deletes not in uow.cycles
+
+ # child side is not part of the cycle, so we will link per-state
+ # actions to the aggregate "saves", "deletes" actions
+ child_actions = [(child_saves, False), (child_deletes, True)]
+ child_in_cycles = False
+ else:
+ child_in_cycles = True
+
+ # check if the "parent" side is part of the cycle
+ if not isdelete:
+ parent_saves = unitofwork.SaveUpdateAll(
+ uow, self.parent.base_mapper
+ )
+ parent_deletes = before_delete = None
+ if parent_saves in uow.cycles:
+ parent_in_cycles = True
+ else:
+ parent_deletes = unitofwork.DeleteAll(uow, self.parent.base_mapper)
+ parent_saves = after_save = None
+ if parent_deletes in uow.cycles:
+ parent_in_cycles = True
+
+ # now create actions /dependencies for each state.
+
+ for state in states:
+ # detect if there's anything changed or loaded
+ # by a preprocessor on this state/attribute. In the
+ # case of deletes we may try to load missing items here as well.
+ sum_ = state.manager[self.key].impl.get_all_pending(
+ state,
+ state.dict,
+ self._passive_delete_flag
+ if isdelete
+ else attributes.PASSIVE_NO_INITIALIZE,
+ )
+
+ if not sum_:
+ continue
+
+ if isdelete:
+ before_delete = unitofwork.ProcessState(uow, self, True, state)
+ if parent_in_cycles:
+ parent_deletes = unitofwork.DeleteState(uow, state)
+ else:
+ after_save = unitofwork.ProcessState(uow, self, False, state)
+ if parent_in_cycles:
+ parent_saves = unitofwork.SaveUpdateState(uow, state)
+
+ if child_in_cycles:
+ child_actions = []
+ for child_state, child in sum_:
+ if child_state not in uow.states:
+ child_action = (None, None)
+ else:
+ (deleted, listonly) = uow.states[child_state]
+ if deleted:
+ child_action = (
+ unitofwork.DeleteState(uow, child_state),
+ True,
+ )
+ else:
+ child_action = (
+ unitofwork.SaveUpdateState(uow, child_state),
+ False,
+ )
+ child_actions.append(child_action)
+
+ # establish dependencies between our possibly per-state
+ # parent action and our possibly per-state child action.
+ for child_action, childisdelete in child_actions:
+ self.per_state_dependencies(
+ uow,
+ parent_saves,
+ parent_deletes,
+ child_action,
+ after_save,
+ before_delete,
+ isdelete,
+ childisdelete,
+ )
+
+ def presort_deletes(self, uowcommit, states):
+ return False
+
+ def presort_saves(self, uowcommit, states):
+ return False
+
+ def process_deletes(self, uowcommit, states):
+ pass
+
+ def process_saves(self, uowcommit, states):
+ pass
+
+ def prop_has_changes(self, uowcommit, states, isdelete):
+ if not isdelete or self.passive_deletes:
+ passive = attributes.PASSIVE_NO_INITIALIZE
+ elif self.direction is MANYTOONE:
+ # here, we were hoping to optimize having to fetch many-to-one
+ # for history and ignore it, if there's no further cascades
+ # to take place. however there are too many less common conditions
+ # that still take place and tests in test_relationships /
+ # test_cascade etc. will still fail.
+ passive = attributes.PASSIVE_NO_FETCH_RELATED
+ else:
+ passive = attributes.PASSIVE_OFF
+
+ for s in states:
+ # TODO: add a high speed method
+ # to InstanceState which returns: attribute
+ # has a non-None value, or had one
+ history = uowcommit.get_attribute_history(s, self.key, passive)
+ if history and not history.empty():
+ return True
+ else:
+ return (
+ states
+ and not self.prop._is_self_referential
+ and self.mapper in uowcommit.mappers
+ )
+
+ def _verify_canload(self, state):
+ if self.prop.uselist and state is None:
+ raise exc.FlushError(
+ "Can't flush None value found in "
+ "collection %s" % (self.prop,)
+ )
+ elif state is not None and not self.mapper._canload(
+ state, allow_subtypes=not self.enable_typechecks
+ ):
+ if self.mapper._canload(state, allow_subtypes=True):
+ raise exc.FlushError(
+ "Attempting to flush an item of type "
+ "%(x)s as a member of collection "
+ '"%(y)s". Expected an object of type '
+ "%(z)s or a polymorphic subclass of "
+ "this type. If %(x)s is a subclass of "
+ '%(z)s, configure mapper "%(zm)s" to '
+ "load this subtype polymorphically, or "
+ "set enable_typechecks=False to allow "
+ "any subtype to be accepted for flush. "
+ % {
+ "x": state.class_,
+ "y": self.prop,
+ "z": self.mapper.class_,
+ "zm": self.mapper,
+ }
+ )
+ else:
+ raise exc.FlushError(
+ "Attempting to flush an item of type "
+ "%(x)s as a member of collection "
+ '"%(y)s". Expected an object of type '
+ "%(z)s or a polymorphic subclass of "
+ "this type."
+ % {
+ "x": state.class_,
+ "y": self.prop,
+ "z": self.mapper.class_,
+ }
+ )
+
+ def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
+ raise NotImplementedError()
+
+ def _get_reversed_processed_set(self, uow):
+ if not self.prop._reverse_property:
+ return None
+
+ process_key = tuple(
+ sorted([self.key] + [p.key for p in self.prop._reverse_property])
+ )
+ return uow.memo(("reverse_key", process_key), set)
+
+ def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
+ for x in related:
+ if not is_m2o_delete or x is not None:
+ uowcommit.register_post_update(
+ state, [r for l, r in self.prop.synchronize_pairs]
+ )
+ break
+
+ def _pks_changed(self, uowcommit, state):
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.prop)
+
+
+class OneToManyDP(DependencyProcessor):
+ def per_property_dependencies(
+ self,
+ uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
+ before_delete,
+ ):
+ if self.post_update:
+ child_post_updates = unitofwork.PostUpdateAll(
+ uow, self.mapper.primary_base_mapper, False
+ )
+ child_pre_updates = unitofwork.PostUpdateAll(
+ uow, self.mapper.primary_base_mapper, True
+ )
+
+ uow.dependencies.update(
+ [
+ (child_saves, after_save),
+ (parent_saves, after_save),
+ (after_save, child_post_updates),
+ (before_delete, child_pre_updates),
+ (child_pre_updates, parent_deletes),
+ (child_pre_updates, child_deletes),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [
+ (parent_saves, after_save),
+ (after_save, child_saves),
+ (after_save, child_deletes),
+ (child_saves, parent_deletes),
+ (child_deletes, parent_deletes),
+ (before_delete, child_saves),
+ (before_delete, child_deletes),
+ ]
+ )
+
+ def per_state_dependencies(
+ self,
+ uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save,
+ before_delete,
+ isdelete,
+ childisdelete,
+ ):
+
+ if self.post_update:
+
+ child_post_updates = unitofwork.PostUpdateAll(
+ uow, self.mapper.primary_base_mapper, False
+ )
+ child_pre_updates = unitofwork.PostUpdateAll(
+ uow, self.mapper.primary_base_mapper, True
+ )
+
+ # TODO: this whole block is not covered
+ # by any tests
+ if not isdelete:
+ if childisdelete:
+ uow.dependencies.update(
+ [
+ (child_action, after_save),
+ (after_save, child_post_updates),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [
+ (save_parent, after_save),
+ (child_action, after_save),
+ (after_save, child_post_updates),
+ ]
+ )
+ else:
+ if childisdelete:
+ uow.dependencies.update(
+ [
+ (before_delete, child_pre_updates),
+ (child_pre_updates, delete_parent),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [
+ (before_delete, child_pre_updates),
+ (child_pre_updates, delete_parent),
+ ]
+ )
+ elif not isdelete:
+ uow.dependencies.update(
+ [
+ (save_parent, after_save),
+ (after_save, child_action),
+ (save_parent, child_action),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [(before_delete, child_action), (child_action, delete_parent)]
+ )
+
+ def presort_deletes(self, uowcommit, states):
+ # head object is being deleted, and we manage its list of
+ # child objects the child objects have to have their
+ # foreign key to the parent set to NULL
+ should_null_fks = (
+ not self.cascade.delete and not self.passive_deletes == "all"
+ )
+
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ for child in history.deleted:
+ if child is not None and self.hasparent(child) is False:
+ if self.cascade.delete_orphan:
+ uowcommit.register_object(child, isdelete=True)
+ else:
+ uowcommit.register_object(child)
+
+ if should_null_fks:
+ for child in history.unchanged:
+ if child is not None:
+ uowcommit.register_object(
+ child, operation="delete", prop=self.prop
+ )
+
+ def presort_saves(self, uowcommit, states):
+ children_added = uowcommit.memo(("children_added", self), set)
+
+ should_null_fks = (
+ not self.cascade.delete_orphan
+ and not self.passive_deletes == "all"
+ )
+
+ for state in states:
+ pks_changed = self._pks_changed(uowcommit, state)
+
+ if not pks_changed or self.passive_updates:
+ passive = attributes.PASSIVE_NO_INITIALIZE
+ else:
+ passive = attributes.PASSIVE_OFF
+
+ history = uowcommit.get_attribute_history(state, self.key, passive)
+ if history:
+ for child in history.added:
+ if child is not None:
+ uowcommit.register_object(
+ child,
+ cancel_delete=True,
+ operation="add",
+ prop=self.prop,
+ )
+
+ children_added.update(history.added)
+
+ for child in history.deleted:
+ if not self.cascade.delete_orphan:
+ if should_null_fks:
+ uowcommit.register_object(
+ child,
+ isdelete=False,
+ operation="delete",
+ prop=self.prop,
+ )
+ elif self.hasparent(child) is False:
+ uowcommit.register_object(
+ child,
+ isdelete=True,
+ operation="delete",
+ prop=self.prop,
+ )
+ for c, m, st_, dct_ in self.mapper.cascade_iterator(
+ "delete", child
+ ):
+ uowcommit.register_object(st_, isdelete=True)
+
+ if pks_changed:
+ if history:
+ for child in history.unchanged:
+ if child is not None:
+ uowcommit.register_object(
+ child,
+ False,
+ self.passive_updates,
+ operation="pk change",
+ prop=self.prop,
+ )
+
+ def process_deletes(self, uowcommit, states):
+ # head object is being deleted, and we manage its list of
+ # child objects the child objects have to have their foreign
+ # key to the parent set to NULL this phase can be called
+ # safely for any cascade but is unnecessary if delete cascade
+ # is on.
+
+ if self.post_update or not self.passive_deletes == "all":
+ children_added = uowcommit.memo(("children_added", self), set)
+
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ for child in history.deleted:
+ if (
+ child is not None
+ and self.hasparent(child) is False
+ ):
+ self._synchronize(
+ state, child, None, True, uowcommit, False
+ )
+ if self.post_update and child:
+ self._post_update(child, uowcommit, [state])
+
+ if self.post_update or not self.cascade.delete:
+ for child in set(history.unchanged).difference(
+ children_added
+ ):
+ if child is not None:
+ self._synchronize(
+ state, child, None, True, uowcommit, False
+ )
+ if self.post_update and child:
+ self._post_update(
+ child, uowcommit, [state]
+ )
+
+ # technically, we can even remove each child from the
+ # collection here too. but this would be a somewhat
+ # inconsistent behavior since it wouldn't happen
+ # if the old parent wasn't deleted but child was moved.
+
+ def process_saves(self, uowcommit, states):
+ should_null_fks = (
+ not self.cascade.delete_orphan
+ and not self.passive_deletes == "all"
+ )
+
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if history:
+ for child in history.added:
+ self._synchronize(
+ state, child, None, False, uowcommit, False
+ )
+ if child is not None and self.post_update:
+ self._post_update(child, uowcommit, [state])
+
+ for child in history.deleted:
+ if (
+ should_null_fks
+ and not self.cascade.delete_orphan
+ and not self.hasparent(child)
+ ):
+ self._synchronize(
+ state, child, None, True, uowcommit, False
+ )
+
+ if self._pks_changed(uowcommit, state):
+ for child in history.unchanged:
+ self._synchronize(
+ state, child, None, False, uowcommit, True
+ )
+
+ def _synchronize(
+ self, state, child, associationrow, clearkeys, uowcommit, pks_changed
+ ):
+ source = state
+ dest = child
+ self._verify_canload(child)
+ if dest is None or (
+ not self.post_update and uowcommit.is_deleted(dest)
+ ):
+ return
+ if clearkeys:
+ sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
+ else:
+ sync.populate(
+ source,
+ self.parent,
+ dest,
+ self.mapper,
+ self.prop.synchronize_pairs,
+ uowcommit,
+ self.passive_updates and pks_changed,
+ )
+
+ def _pks_changed(self, uowcommit, state):
+ return sync.source_modified(
+ uowcommit, state, self.parent, self.prop.synchronize_pairs
+ )
+
+
+class ManyToOneDP(DependencyProcessor):
+ def __init__(self, prop):
+ DependencyProcessor.__init__(self, prop)
+ for mapper in self.mapper.self_and_descendants:
+ mapper._dependency_processors.append(DetectKeySwitch(prop))
+
+ def per_property_dependencies(
+ self,
+ uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
+ before_delete,
+ ):
+
+ if self.post_update:
+ parent_post_updates = unitofwork.PostUpdateAll(
+ uow, self.parent.primary_base_mapper, False
+ )
+ parent_pre_updates = unitofwork.PostUpdateAll(
+ uow, self.parent.primary_base_mapper, True
+ )
+
+ uow.dependencies.update(
+ [
+ (child_saves, after_save),
+ (parent_saves, after_save),
+ (after_save, parent_post_updates),
+ (after_save, parent_pre_updates),
+ (before_delete, parent_pre_updates),
+ (parent_pre_updates, child_deletes),
+ (parent_pre_updates, parent_deletes),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [
+ (child_saves, after_save),
+ (after_save, parent_saves),
+ (parent_saves, child_deletes),
+ (parent_deletes, child_deletes),
+ ]
+ )
+
+ def per_state_dependencies(
+ self,
+ uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save,
+ before_delete,
+ isdelete,
+ childisdelete,
+ ):
+
+ if self.post_update:
+
+ if not isdelete:
+ parent_post_updates = unitofwork.PostUpdateAll(
+ uow, self.parent.primary_base_mapper, False
+ )
+ if childisdelete:
+ uow.dependencies.update(
+ [
+ (after_save, parent_post_updates),
+ (parent_post_updates, child_action),
+ ]
+ )
+ else:
+ uow.dependencies.update(
+ [
+ (save_parent, after_save),
+ (child_action, after_save),
+ (after_save, parent_post_updates),
+ ]
+ )
+ else:
+ parent_pre_updates = unitofwork.PostUpdateAll(
+ uow, self.parent.primary_base_mapper, True
+ )
+
+ uow.dependencies.update(
+ [
+ (before_delete, parent_pre_updates),
+ (parent_pre_updates, delete_parent),
+ (parent_pre_updates, child_action),
+ ]
+ )
+
+ elif not isdelete:
+ if not childisdelete:
+ uow.dependencies.update(
+ [(child_action, after_save), (after_save, save_parent)]
+ )
+ else:
+ uow.dependencies.update([(after_save, save_parent)])
+
+ else:
+ if childisdelete:
+ uow.dependencies.update([(delete_parent, child_action)])
+
+ def presort_deletes(self, uowcommit, states):
+ if self.cascade.delete or self.cascade.delete_orphan:
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ if self.cascade.delete_orphan:
+ todelete = history.sum()
+ else:
+ todelete = history.non_deleted()
+ for child in todelete:
+ if child is None:
+ continue
+ uowcommit.register_object(
+ child,
+ isdelete=True,
+ operation="delete",
+ prop=self.prop,
+ )
+ t = self.mapper.cascade_iterator("delete", child)
+ for c, m, st_, dct_ in t:
+ uowcommit.register_object(st_, isdelete=True)
+
+ def presort_saves(self, uowcommit, states):
+ for state in states:
+ uowcommit.register_object(state, operation="add", prop=self.prop)
+ if self.cascade.delete_orphan:
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ for child in history.deleted:
+ if self.hasparent(child) is False:
+ uowcommit.register_object(
+ child,
+ isdelete=True,
+ operation="delete",
+ prop=self.prop,
+ )
+
+ t = self.mapper.cascade_iterator("delete", child)
+ for c, m, st_, dct_ in t:
+ uowcommit.register_object(st_, isdelete=True)
+
+ def process_deletes(self, uowcommit, states):
+ if (
+ self.post_update
+ and not self.cascade.delete_orphan
+ and not self.passive_deletes == "all"
+ ):
+
+ # post_update means we have to update our
+ # row to not reference the child object
+ # before we can DELETE the row
+ for state in states:
+ self._synchronize(state, None, None, True, uowcommit)
+ if state and self.post_update:
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ self._post_update(
+ state, uowcommit, history.sum(), is_m2o_delete=True
+ )
+
+ def process_saves(self, uowcommit, states):
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if history:
+ if history.added:
+ for child in history.added:
+ self._synchronize(
+ state, child, None, False, uowcommit, "add"
+ )
+ elif history.deleted:
+ self._synchronize(
+ state, None, None, True, uowcommit, "delete"
+ )
+ if self.post_update:
+ self._post_update(state, uowcommit, history.sum())
+
+ def _synchronize(
+ self,
+ state,
+ child,
+ associationrow,
+ clearkeys,
+ uowcommit,
+ operation=None,
+ ):
+ if state is None or (
+ not self.post_update and uowcommit.is_deleted(state)
+ ):
+ return
+
+ if (
+ operation is not None
+ and child is not None
+ and not uowcommit.session._contains_state(child)
+ ):
+ util.warn(
+ "Object of type %s not in session, %s "
+ "operation along '%s' won't proceed"
+ % (mapperutil.state_class_str(child), operation, self.prop)
+ )
+ return
+
+ if clearkeys or child is None:
+ sync.clear(state, self.parent, self.prop.synchronize_pairs)
+ else:
+ self._verify_canload(child)
+ sync.populate(
+ child,
+ self.mapper,
+ state,
+ self.parent,
+ self.prop.synchronize_pairs,
+ uowcommit,
+ False,
+ )
+
+
+class DetectKeySwitch(DependencyProcessor):
+ """For many-to-one relationships with no one-to-many backref,
+ searches for parents through the unit of work when a primary
+ key has changed and updates them.
+
+ Theoretically, this approach could be expanded to support transparent
+ deletion of objects referenced via many-to-one as well, although
+ the current attribute system doesn't do enough bookkeeping for this
+ to be efficient.
+
+ """
+
+ def per_property_preprocessors(self, uow):
+ if self.prop._reverse_property:
+ if self.passive_updates:
+ return
+ else:
+ if False in (
+ prop.passive_updates
+ for prop in self.prop._reverse_property
+ ):
+ return
+
+ uow.register_preprocessor(self, False)
+
+ def per_property_flush_actions(self, uow):
+ parent_saves = unitofwork.SaveUpdateAll(uow, self.parent.base_mapper)
+ after_save = unitofwork.ProcessAll(uow, self, False, False)
+ uow.dependencies.update([(parent_saves, after_save)])
+
+ def per_state_flush_actions(self, uow, states, isdelete):
+ pass
+
+ def presort_deletes(self, uowcommit, states):
+ pass
+
+ def presort_saves(self, uow, states):
+ if not self.passive_updates:
+ # for non-passive updates, register in the preprocess stage
+ # so that mapper save_obj() gets a hold of changes
+ self._process_key_switches(states, uow)
+
+ def prop_has_changes(self, uow, states, isdelete):
+ if not isdelete and self.passive_updates:
+ d = self._key_switchers(uow, states)
+ return bool(d)
+
+ return False
+
+ def process_deletes(self, uowcommit, states):
+ assert False
+
+ def process_saves(self, uowcommit, states):
+ # for passive updates, register objects in the process stage
+ # so that we avoid ManyToOneDP's registering the object without
+ # the listonly flag in its own preprocess stage (results in UPDATE)
+ # statements being emitted
+ assert self.passive_updates
+ self._process_key_switches(states, uowcommit)
+
+ def _key_switchers(self, uow, states):
+ switched, notswitched = uow.memo(
+ ("pk_switchers", self), lambda: (set(), set())
+ )
+
+ allstates = switched.union(notswitched)
+ for s in states:
+ if s not in allstates:
+ if self._pks_changed(uow, s):
+ switched.add(s)
+ else:
+ notswitched.add(s)
+ return switched
+
+ def _process_key_switches(self, deplist, uowcommit):
+ switchers = self._key_switchers(uowcommit, deplist)
+ if switchers:
+ # if primary key values have actually changed somewhere, perform
+ # a linear search through the UOW in search of a parent.
+ for state in uowcommit.session.identity_map.all_states():
+ if not issubclass(state.class_, self.parent.class_):
+ continue
+ dict_ = state.dict
+ related = state.get_impl(self.key).get(
+ state, dict_, passive=self._passive_update_flag
+ )
+ if (
+ related is not attributes.PASSIVE_NO_RESULT
+ and related is not None
+ ):
+ if self.prop.uselist:
+ if not related:
+ continue
+ related_obj = related[0]
+ else:
+ related_obj = related
+ related_state = attributes.instance_state(related_obj)
+ if related_state in switchers:
+ uowcommit.register_object(
+ state, False, self.passive_updates
+ )
+ sync.populate(
+ related_state,
+ self.mapper,
+ state,
+ self.parent,
+ self.prop.synchronize_pairs,
+ uowcommit,
+ self.passive_updates,
+ )
+
+ def _pks_changed(self, uowcommit, state):
+ return bool(state.key) and sync.source_modified(
+ uowcommit, state, self.mapper, self.prop.synchronize_pairs
+ )
+
+
+class ManyToManyDP(DependencyProcessor):
+ def per_property_dependencies(
+ self,
+ uow,
+ parent_saves,
+ child_saves,
+ parent_deletes,
+ child_deletes,
+ after_save,
+ before_delete,
+ ):
+
+ uow.dependencies.update(
+ [
+ (parent_saves, after_save),
+ (child_saves, after_save),
+ (after_save, child_deletes),
+ # a rowswitch on the parent from deleted to saved
+ # can make this one occur, as the "save" may remove
+ # an element from the
+ # "deleted" list before we have a chance to
+ # process its child rows
+ (before_delete, parent_saves),
+ (before_delete, parent_deletes),
+ (before_delete, child_deletes),
+ (before_delete, child_saves),
+ ]
+ )
+
+ def per_state_dependencies(
+ self,
+ uow,
+ save_parent,
+ delete_parent,
+ child_action,
+ after_save,
+ before_delete,
+ isdelete,
+ childisdelete,
+ ):
+ if not isdelete:
+ if childisdelete:
+ uow.dependencies.update(
+ [(save_parent, after_save), (after_save, child_action)]
+ )
+ else:
+ uow.dependencies.update(
+ [(save_parent, after_save), (child_action, after_save)]
+ )
+ else:
+ uow.dependencies.update(
+ [(before_delete, child_action), (before_delete, delete_parent)]
+ )
+
+ def presort_deletes(self, uowcommit, states):
+ # TODO: no tests fail if this whole
+ # thing is removed !!!!
+ if not self.passive_deletes:
+ # if no passive deletes, load history on
+ # the collection, so that prop_has_changes()
+ # returns True
+ for state in states:
+ uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+
+ def presort_saves(self, uowcommit, states):
+ if not self.passive_updates:
+ # if no passive updates, load history on
+ # each collection where parent has changed PK,
+ # so that prop_has_changes() returns True
+ for state in states:
+ if self._pks_changed(uowcommit, state):
+ history = uowcommit.get_attribute_history(
+ state, self.key, attributes.PASSIVE_OFF
+ )
+
+ if not self.cascade.delete_orphan:
+ return
+
+ # check for child items removed from the collection
+ # if delete_orphan check is turned on.
+ for state in states:
+ history = uowcommit.get_attribute_history(
+ state, self.key, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if history:
+ for child in history.deleted:
+ if self.hasparent(child) is False:
+ uowcommit.register_object(
+ child,
+ isdelete=True,
+ operation="delete",
+ prop=self.prop,
+ )
+ for c, m, st_, dct_ in self.mapper.cascade_iterator(
+ "delete", child
+ ):
+ uowcommit.register_object(st_, isdelete=True)
+
+ def process_deletes(self, uowcommit, states):
+ secondary_delete = []
+ secondary_insert = []
+ secondary_update = []
+
+ processed = self._get_reversed_processed_set(uowcommit)
+ tmp = set()
+ for state in states:
+ # this history should be cached already, as
+ # we loaded it in preprocess_deletes
+ history = uowcommit.get_attribute_history(
+ state, self.key, self._passive_delete_flag
+ )
+ if history:
+ for child in history.non_added():
+ if child is None or (
+ processed is not None and (state, child) in processed
+ ):
+ continue
+ associationrow = {}
+ if not self._synchronize(
+ state,
+ child,
+ associationrow,
+ False,
+ uowcommit,
+ "delete",
+ ):
+ continue
+ secondary_delete.append(associationrow)
+
+ tmp.update((c, state) for c in history.non_added())
+
+ if processed is not None:
+ processed.update(tmp)
+
+ self._run_crud(
+ uowcommit, secondary_insert, secondary_update, secondary_delete
+ )
+
+ def process_saves(self, uowcommit, states):
+ secondary_delete = []
+ secondary_insert = []
+ secondary_update = []
+
+ processed = self._get_reversed_processed_set(uowcommit)
+ tmp = set()
+
+ for state in states:
+ need_cascade_pks = not self.passive_updates and self._pks_changed(
+ uowcommit, state
+ )
+ if need_cascade_pks:
+ passive = attributes.PASSIVE_OFF
+ else:
+ passive = attributes.PASSIVE_NO_INITIALIZE
+ history = uowcommit.get_attribute_history(state, self.key, passive)
+ if history:
+ for child in history.added:
+ if processed is not None and (state, child) in processed:
+ continue
+ associationrow = {}
+ if not self._synchronize(
+ state, child, associationrow, False, uowcommit, "add"
+ ):
+ continue
+ secondary_insert.append(associationrow)
+ for child in history.deleted:
+ if processed is not None and (state, child) in processed:
+ continue
+ associationrow = {}
+ if not self._synchronize(
+ state,
+ child,
+ associationrow,
+ False,
+ uowcommit,
+ "delete",
+ ):
+ continue
+ secondary_delete.append(associationrow)
+
+ tmp.update((c, state) for c in history.added + history.deleted)
+
+ if need_cascade_pks:
+
+ for child in history.unchanged:
+ associationrow = {}
+ sync.update(
+ state,
+ self.parent,
+ associationrow,
+ "old_",
+ self.prop.synchronize_pairs,
+ )
+ sync.update(
+ child,
+ self.mapper,
+ associationrow,
+ "old_",
+ self.prop.secondary_synchronize_pairs,
+ )
+
+ secondary_update.append(associationrow)
+
+ if processed is not None:
+ processed.update(tmp)
+
+ self._run_crud(
+ uowcommit, secondary_insert, secondary_update, secondary_delete
+ )
+
+ def _run_crud(
+ self, uowcommit, secondary_insert, secondary_update, secondary_delete
+ ):
+ connection = uowcommit.transaction.connection(self.mapper)
+
+ if secondary_delete:
+ associationrow = secondary_delete[0]
+ statement = self.secondary.delete().where(
+ sql.and_(
+ *[
+ c == sql.bindparam(c.key, type_=c.type)
+ for c in self.secondary.c
+ if c.key in associationrow
+ ]
+ )
+ )
+ result = connection.execute(statement, secondary_delete)
+
+ if (
+ result.supports_sane_multi_rowcount()
+ ) and result.rowcount != len(secondary_delete):
+ raise exc.StaleDataError(
+ "DELETE statement on table '%s' expected to delete "
+ "%d row(s); Only %d were matched."
+ % (
+ self.secondary.description,
+ len(secondary_delete),
+ result.rowcount,
+ )
+ )
+
+ if secondary_update:
+ associationrow = secondary_update[0]
+ statement = self.secondary.update().where(
+ sql.and_(
+ *[
+ c == sql.bindparam("old_" + c.key, type_=c.type)
+ for c in self.secondary.c
+ if c.key in associationrow
+ ]
+ )
+ )
+ result = connection.execute(statement, secondary_update)
+
+ if (
+ result.supports_sane_multi_rowcount()
+ ) and result.rowcount != len(secondary_update):
+ raise exc.StaleDataError(
+ "UPDATE statement on table '%s' expected to update "
+ "%d row(s); Only %d were matched."
+ % (
+ self.secondary.description,
+ len(secondary_update),
+ result.rowcount,
+ )
+ )
+
+ if secondary_insert:
+ statement = self.secondary.insert()
+ connection.execute(statement, secondary_insert)
+
+ def _synchronize(
+ self, state, child, associationrow, clearkeys, uowcommit, operation
+ ):
+
+ # this checks for None if uselist=True
+ self._verify_canload(child)
+
+ # but if uselist=False we get here. If child is None,
+ # no association row can be generated, so return.
+ if child is None:
+ return False
+
+ if child is not None and not uowcommit.session._contains_state(child):
+ if not child.deleted:
+ util.warn(
+ "Object of type %s not in session, %s "
+ "operation along '%s' won't proceed"
+ % (mapperutil.state_class_str(child), operation, self.prop)
+ )
+ return False
+
+ sync.populate_dict(
+ state, self.parent, associationrow, self.prop.synchronize_pairs
+ )
+ sync.populate_dict(
+ child,
+ self.mapper,
+ associationrow,
+ self.prop.secondary_synchronize_pairs,
+ )
+
+ return True
+
+ def _pks_changed(self, uowcommit, state):
+ return sync.source_modified(
+ uowcommit, state, self.parent, self.prop.synchronize_pairs
+ )
+
+
+_direction_to_processor = {
+ ONETOMANY: OneToManyDP,
+ MANYTOONE: ManyToOneDP,
+ MANYTOMANY: ManyToManyDP,
+}
diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py
new file mode 100644
index 0000000..3d7f23b
--- /dev/null
+++ b/lib/sqlalchemy/orm/descriptor_props.py
@@ -0,0 +1,745 @@
+# orm/descriptor_props.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Descriptor properties are more "auxiliary" properties
+that exist as configurational elements, but don't participate
+as actively in the load/persist ORM loop.
+
+"""
+
+from . import attributes
+from . import util as orm_util
+from .interfaces import MapperProperty
+from .interfaces import PropComparator
+from .util import _none_set
+from .. import event
+from .. import exc as sa_exc
+from .. import schema
+from .. import sql
+from .. import util
+from ..sql import expression
+from ..sql import operators
+
+
+class DescriptorProperty(MapperProperty):
+ """:class:`.MapperProperty` which proxies access to a
+ user-defined descriptor."""
+
+ doc = None
+
+ uses_objects = False
+ _links_to_entity = False
+
+ def instrument_class(self, mapper):
+ prop = self
+
+ class _ProxyImpl(object):
+ accepts_scalar_loader = False
+ load_on_unexpire = True
+ collection = False
+
+ @property
+ def uses_objects(self):
+ return prop.uses_objects
+
+ def __init__(self, key):
+ self.key = key
+
+ if hasattr(prop, "get_history"):
+
+ def get_history(
+ self, state, dict_, passive=attributes.PASSIVE_OFF
+ ):
+ return prop.get_history(state, dict_, passive)
+
+ if self.descriptor is None:
+ desc = getattr(mapper.class_, self.key, None)
+ if mapper._is_userland_descriptor(self.key, desc):
+ self.descriptor = desc
+
+ if self.descriptor is None:
+
+ def fset(obj, value):
+ setattr(obj, self.name, value)
+
+ def fdel(obj):
+ delattr(obj, self.name)
+
+ def fget(obj):
+ return getattr(obj, self.name)
+
+ self.descriptor = property(fget=fget, fset=fset, fdel=fdel)
+
+ proxy_attr = attributes.create_proxied_attribute(self.descriptor)(
+ self.parent.class_,
+ self.key,
+ self.descriptor,
+ lambda: self._comparator_factory(mapper),
+ doc=self.doc,
+ original_property=self,
+ )
+ proxy_attr.impl = _ProxyImpl(self.key)
+ mapper.class_manager.instrument_attribute(self.key, proxy_attr)
+
+
+class CompositeProperty(DescriptorProperty):
+ """Defines a "composite" mapped attribute, representing a collection
+ of columns as one attribute.
+
+ :class:`.CompositeProperty` is constructed using the :func:`.composite`
+ function.
+
+ .. seealso::
+
+ :ref:`mapper_composite`
+
+ """
+
+ def __init__(self, class_, *attrs, **kwargs):
+ r"""Return a composite column-based property for use with a Mapper.
+
+ See the mapping documentation section :ref:`mapper_composite` for a
+ full usage example.
+
+ The :class:`.MapperProperty` returned by :func:`.composite`
+ is the :class:`.CompositeProperty`.
+
+ :param class\_:
+ The "composite type" class, or any classmethod or callable which
+ will produce a new instance of the composite object given the
+ column values in order.
+
+ :param \*cols:
+ List of Column objects to be mapped.
+
+ :param active_history=False:
+ When ``True``, indicates that the "previous" value for a
+ scalar attribute should be loaded when replaced, if not
+ already loaded. See the same flag on :func:`.column_property`.
+
+ :param group:
+ A group name for this property when marked as deferred.
+
+ :param deferred:
+ When True, the column property is "deferred", meaning that it does
+ not load immediately, and is instead loaded when the attribute is
+ first accessed on an instance. See also
+ :func:`~sqlalchemy.orm.deferred`.
+
+ :param comparator_factory: a class which extends
+ :class:`.CompositeProperty.Comparator` which provides custom SQL
+ clause generation for comparison operations.
+
+ :param doc:
+ optional string that will be applied as the doc on the
+ class-bound descriptor.
+
+ :param info: Optional data dictionary which will be populated into the
+ :attr:`.MapperProperty.info` attribute of this object.
+
+ """
+ super(CompositeProperty, self).__init__()
+
+ self.attrs = attrs
+ self.composite_class = class_
+ self.active_history = kwargs.get("active_history", False)
+ self.deferred = kwargs.get("deferred", False)
+ self.group = kwargs.get("group", None)
+ self.comparator_factory = kwargs.pop(
+ "comparator_factory", self.__class__.Comparator
+ )
+ if "info" in kwargs:
+ self.info = kwargs.pop("info")
+
+ util.set_creation_order(self)
+ self._create_descriptor()
+
+ def instrument_class(self, mapper):
+ super(CompositeProperty, self).instrument_class(mapper)
+ self._setup_event_handlers()
+
+ def do_init(self):
+ """Initialization which occurs after the :class:`.CompositeProperty`
+ has been associated with its parent mapper.
+
+ """
+ self._setup_arguments_on_columns()
+
+ _COMPOSITE_FGET = object()
+
+ def _create_descriptor(self):
+ """Create the Python descriptor that will serve as
+ the access point on instances of the mapped class.
+
+ """
+
+ def fget(instance):
+ dict_ = attributes.instance_dict(instance)
+ state = attributes.instance_state(instance)
+
+ if self.key not in dict_:
+ # key not present. Iterate through related
+ # attributes, retrieve their values. This
+ # ensures they all load.
+ values = [
+ getattr(instance, key) for key in self._attribute_keys
+ ]
+
+ # current expected behavior here is that the composite is
+ # created on access if the object is persistent or if
+ # col attributes have non-None. This would be better
+ # if the composite were created unconditionally,
+ # but that would be a behavioral change.
+ if self.key not in dict_ and (
+ state.key is not None or not _none_set.issuperset(values)
+ ):
+ dict_[self.key] = self.composite_class(*values)
+ state.manager.dispatch.refresh(
+ state, self._COMPOSITE_FGET, [self.key]
+ )
+
+ return dict_.get(self.key, None)
+
+ def fset(instance, value):
+ dict_ = attributes.instance_dict(instance)
+ state = attributes.instance_state(instance)
+ attr = state.manager[self.key]
+ previous = dict_.get(self.key, attributes.NO_VALUE)
+ for fn in attr.dispatch.set:
+ value = fn(state, value, previous, attr.impl)
+ dict_[self.key] = value
+ if value is None:
+ for key in self._attribute_keys:
+ setattr(instance, key, None)
+ else:
+ for key, value in zip(
+ self._attribute_keys, value.__composite_values__()
+ ):
+ setattr(instance, key, value)
+
+ def fdel(instance):
+ state = attributes.instance_state(instance)
+ dict_ = attributes.instance_dict(instance)
+ previous = dict_.pop(self.key, attributes.NO_VALUE)
+ attr = state.manager[self.key]
+ attr.dispatch.remove(state, previous, attr.impl)
+ for key in self._attribute_keys:
+ setattr(instance, key, None)
+
+ self.descriptor = property(fget, fset, fdel)
+
+ @util.memoized_property
+ def _comparable_elements(self):
+ return [getattr(self.parent.class_, prop.key) for prop in self.props]
+
+ @util.memoized_property
+ def props(self):
+ props = []
+ for attr in self.attrs:
+ if isinstance(attr, str):
+ prop = self.parent.get_property(attr, _configure_mappers=False)
+ elif isinstance(attr, schema.Column):
+ prop = self.parent._columntoproperty[attr]
+ elif isinstance(attr, attributes.InstrumentedAttribute):
+ prop = attr.property
+ else:
+ raise sa_exc.ArgumentError(
+ "Composite expects Column objects or mapped "
+ "attributes/attribute names as arguments, got: %r"
+ % (attr,)
+ )
+ props.append(prop)
+ return props
+
+ @property
+ def columns(self):
+ return [a for a in self.attrs if isinstance(a, schema.Column)]
+
+ def _setup_arguments_on_columns(self):
+ """Propagate configuration arguments made on this composite
+ to the target columns, for those that apply.
+
+ """
+ for prop in self.props:
+ prop.active_history = self.active_history
+ if self.deferred:
+ prop.deferred = self.deferred
+ prop.strategy_key = (("deferred", True), ("instrument", True))
+ prop.group = self.group
+
+ def _setup_event_handlers(self):
+ """Establish events that populate/expire the composite attribute."""
+
+ def load_handler(state, context):
+ _load_refresh_handler(state, context, None, is_refresh=False)
+
+ def refresh_handler(state, context, to_load):
+ # note this corresponds to sqlalchemy.ext.mutable load_attrs()
+
+ if not to_load or (
+ {self.key}.union(self._attribute_keys)
+ ).intersection(to_load):
+ _load_refresh_handler(state, context, to_load, is_refresh=True)
+
+ def _load_refresh_handler(state, context, to_load, is_refresh):
+ dict_ = state.dict
+
+ # if context indicates we are coming from the
+ # fget() handler, this already set the value; skip the
+ # handler here. (other handlers like mutablecomposite will still
+ # want to catch it)
+ # there's an insufficiency here in that the fget() handler
+ # really should not be using the refresh event and there should
+ # be some other event that mutablecomposite can subscribe
+ # towards for this.
+
+ if (
+ not is_refresh or context is self._COMPOSITE_FGET
+ ) and self.key in dict_:
+ return
+
+ # if column elements aren't loaded, skip.
+ # __get__() will initiate a load for those
+ # columns
+ for k in self._attribute_keys:
+ if k not in dict_:
+ return
+
+ dict_[self.key] = self.composite_class(
+ *[state.dict[key] for key in self._attribute_keys]
+ )
+
+ def expire_handler(state, keys):
+ if keys is None or set(self._attribute_keys).intersection(keys):
+ state.dict.pop(self.key, None)
+
+ def insert_update_handler(mapper, connection, state):
+ """After an insert or update, some columns may be expired due
+ to server side defaults, or re-populated due to client side
+ defaults. Pop out the composite value here so that it
+ recreates.
+
+ """
+
+ state.dict.pop(self.key, None)
+
+ event.listen(
+ self.parent, "after_insert", insert_update_handler, raw=True
+ )
+ event.listen(
+ self.parent, "after_update", insert_update_handler, raw=True
+ )
+ event.listen(
+ self.parent, "load", load_handler, raw=True, propagate=True
+ )
+ event.listen(
+ self.parent, "refresh", refresh_handler, raw=True, propagate=True
+ )
+ event.listen(
+ self.parent, "expire", expire_handler, raw=True, propagate=True
+ )
+
+ # TODO: need a deserialize hook here
+
+ @util.memoized_property
+ def _attribute_keys(self):
+ return [prop.key for prop in self.props]
+
+ def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
+ """Provided for userland code that uses attributes.get_history()."""
+
+ added = []
+ deleted = []
+
+ has_history = False
+ for prop in self.props:
+ key = prop.key
+ hist = state.manager[key].impl.get_history(state, dict_)
+ if hist.has_changes():
+ has_history = True
+
+ non_deleted = hist.non_deleted()
+ if non_deleted:
+ added.extend(non_deleted)
+ else:
+ added.append(None)
+ if hist.deleted:
+ deleted.extend(hist.deleted)
+ else:
+ deleted.append(None)
+
+ if has_history:
+ return attributes.History(
+ [self.composite_class(*added)],
+ (),
+ [self.composite_class(*deleted)],
+ )
+ else:
+ return attributes.History((), [self.composite_class(*added)], ())
+
+ def _comparator_factory(self, mapper):
+ return self.comparator_factory(self, mapper)
+
+ class CompositeBundle(orm_util.Bundle):
+ def __init__(self, property_, expr):
+ self.property = property_
+ super(CompositeProperty.CompositeBundle, self).__init__(
+ property_.key, *expr
+ )
+
+ def create_row_processor(self, query, procs, labels):
+ def proc(row):
+ return self.property.composite_class(
+ *[proc(row) for proc in procs]
+ )
+
+ return proc
+
+ class Comparator(PropComparator):
+ """Produce boolean, comparison, and other operators for
+ :class:`.CompositeProperty` attributes.
+
+ See the example in :ref:`composite_operations` for an overview
+ of usage , as well as the documentation for :class:`.PropComparator`.
+
+ .. seealso::
+
+ :class:`.PropComparator`
+
+ :class:`.ColumnOperators`
+
+ :ref:`types_operators`
+
+ :attr:`.TypeEngine.comparator_factory`
+
+ """
+
+ __hash__ = None
+
+ @util.memoized_property
+ def clauses(self):
+ return expression.ClauseList(
+ group=False, *self._comparable_elements
+ )
+
+ def __clause_element__(self):
+ return self.expression
+
+ @util.memoized_property
+ def expression(self):
+ clauses = self.clauses._annotate(
+ {
+ "parententity": self._parententity,
+ "parentmapper": self._parententity,
+ "proxy_key": self.prop.key,
+ }
+ )
+ return CompositeProperty.CompositeBundle(self.prop, clauses)
+
+ def _bulk_update_tuples(self, value):
+ if isinstance(value, sql.elements.BindParameter):
+ value = value.value
+
+ if value is None:
+ values = [None for key in self.prop._attribute_keys]
+ elif isinstance(value, self.prop.composite_class):
+ values = value.__composite_values__()
+ else:
+ raise sa_exc.ArgumentError(
+ "Can't UPDATE composite attribute %s to %r"
+ % (self.prop, value)
+ )
+
+ return zip(self._comparable_elements, values)
+
+ @util.memoized_property
+ def _comparable_elements(self):
+ if self._adapt_to_entity:
+ return [
+ getattr(self._adapt_to_entity.entity, prop.key)
+ for prop in self.prop._comparable_elements
+ ]
+ else:
+ return self.prop._comparable_elements
+
+ def __eq__(self, other):
+ if other is None:
+ values = [None] * len(self.prop._comparable_elements)
+ else:
+ values = other.__composite_values__()
+ comparisons = [
+ a == b for a, b in zip(self.prop._comparable_elements, values)
+ ]
+ if self._adapt_to_entity:
+ comparisons = [self.adapter(x) for x in comparisons]
+ return sql.and_(*comparisons)
+
+ def __ne__(self, other):
+ return sql.not_(self.__eq__(other))
+
+ def __str__(self):
+ return str(self.parent.class_.__name__) + "." + self.key
+
+
+class ConcreteInheritedProperty(DescriptorProperty):
+ """A 'do nothing' :class:`.MapperProperty` that disables
+ an attribute on a concrete subclass that is only present
+ on the inherited mapper, not the concrete classes' mapper.
+
+ Cases where this occurs include:
+
+ * When the superclass mapper is mapped against a
+ "polymorphic union", which includes all attributes from
+ all subclasses.
+ * When a relationship() is configured on an inherited mapper,
+ but not on the subclass mapper. Concrete mappers require
+ that relationship() is configured explicitly on each
+ subclass.
+
+ """
+
+ def _comparator_factory(self, mapper):
+ comparator_callable = None
+
+ for m in self.parent.iterate_to_root():
+ p = m._props[self.key]
+ if not isinstance(p, ConcreteInheritedProperty):
+ comparator_callable = p.comparator_factory
+ break
+ return comparator_callable
+
+ def __init__(self):
+ super(ConcreteInheritedProperty, self).__init__()
+
+ def warn():
+ raise AttributeError(
+ "Concrete %s does not implement "
+ "attribute %r at the instance level. Add "
+ "this property explicitly to %s."
+ % (self.parent, self.key, self.parent)
+ )
+
+ class NoninheritedConcreteProp(object):
+ def __set__(s, obj, value):
+ warn()
+
+ def __delete__(s, obj):
+ warn()
+
+ def __get__(s, obj, owner):
+ if obj is None:
+ return self.descriptor
+ warn()
+
+ self.descriptor = NoninheritedConcreteProp()
+
+
+class SynonymProperty(DescriptorProperty):
+ def __init__(
+ self,
+ name,
+ map_column=None,
+ descriptor=None,
+ comparator_factory=None,
+ doc=None,
+ info=None,
+ ):
+ """Denote an attribute name as a synonym to a mapped property,
+ in that the attribute will mirror the value and expression behavior
+ of another attribute.
+
+ e.g.::
+
+ class MyClass(Base):
+ __tablename__ = 'my_table'
+
+ id = Column(Integer, primary_key=True)
+ job_status = Column(String(50))
+
+ status = synonym("job_status")
+
+
+ :param name: the name of the existing mapped property. This
+ can refer to the string name ORM-mapped attribute
+ configured on the class, including column-bound attributes
+ and relationships.
+
+ :param descriptor: a Python :term:`descriptor` that will be used
+ as a getter (and potentially a setter) when this attribute is
+ accessed at the instance level.
+
+ :param map_column: **For classical mappings and mappings against
+ an existing Table object only**. if ``True``, the :func:`.synonym`
+ construct will locate the :class:`_schema.Column`
+ object upon the mapped
+ table that would normally be associated with the attribute name of
+ this synonym, and produce a new :class:`.ColumnProperty` that instead
+ maps this :class:`_schema.Column`
+ to the alternate name given as the "name"
+ argument of the synonym; in this way, the usual step of redefining
+ the mapping of the :class:`_schema.Column`
+ to be under a different name is
+ unnecessary. This is usually intended to be used when a
+ :class:`_schema.Column`
+ is to be replaced with an attribute that also uses a
+ descriptor, that is, in conjunction with the
+ :paramref:`.synonym.descriptor` parameter::
+
+ my_table = Table(
+ "my_table", metadata,
+ Column('id', Integer, primary_key=True),
+ Column('job_status', String(50))
+ )
+
+ class MyClass(object):
+ @property
+ def _job_status_descriptor(self):
+ return "Status: %s" % self._job_status
+
+
+ mapper(
+ MyClass, my_table, properties={
+ "job_status": synonym(
+ "_job_status", map_column=True,
+ descriptor=MyClass._job_status_descriptor)
+ }
+ )
+
+ Above, the attribute named ``_job_status`` is automatically
+ mapped to the ``job_status`` column::
+
+ >>> j1 = MyClass()
+ >>> j1._job_status = "employed"
+ >>> j1.job_status
+ Status: employed
+
+ When using Declarative, in order to provide a descriptor in
+ conjunction with a synonym, use the
+ :func:`sqlalchemy.ext.declarative.synonym_for` helper. However,
+ note that the :ref:`hybrid properties <mapper_hybrids>` feature
+ should usually be preferred, particularly when redefining attribute
+ behavior.
+
+ :param info: Optional data dictionary which will be populated into the
+ :attr:`.InspectionAttr.info` attribute of this object.
+
+ .. versionadded:: 1.0.0
+
+ :param comparator_factory: A subclass of :class:`.PropComparator`
+ that will provide custom comparison behavior at the SQL expression
+ level.
+
+ .. note::
+
+ For the use case of providing an attribute which redefines both
+ Python-level and SQL-expression level behavior of an attribute,
+ please refer to the Hybrid attribute introduced at
+ :ref:`mapper_hybrids` for a more effective technique.
+
+ .. seealso::
+
+ :ref:`synonyms` - Overview of synonyms
+
+ :func:`.synonym_for` - a helper oriented towards Declarative
+
+ :ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
+ updated approach to augmenting attribute behavior more flexibly
+ than can be achieved with synonyms.
+
+ """
+ super(SynonymProperty, self).__init__()
+
+ self.name = name
+ self.map_column = map_column
+ self.descriptor = descriptor
+ self.comparator_factory = comparator_factory
+ self.doc = doc or (descriptor and descriptor.__doc__) or None
+ if info:
+ self.info = info
+
+ util.set_creation_order(self)
+
+ @property
+ def uses_objects(self):
+ return getattr(self.parent.class_, self.name).impl.uses_objects
+
+ # TODO: when initialized, check _proxied_object,
+ # emit a warning if its not a column-based property
+
+ @util.memoized_property
+ def _proxied_object(self):
+ attr = getattr(self.parent.class_, self.name)
+ if not hasattr(attr, "property") or not isinstance(
+ attr.property, MapperProperty
+ ):
+ # attribute is a non-MapperProprerty proxy such as
+ # hybrid or association proxy
+ if isinstance(attr, attributes.QueryableAttribute):
+ return attr.comparator
+ elif isinstance(attr, operators.ColumnOperators):
+ return attr
+
+ raise sa_exc.InvalidRequestError(
+ """synonym() attribute "%s.%s" only supports """
+ """ORM mapped attributes, got %r"""
+ % (self.parent.class_.__name__, self.name, attr)
+ )
+ return attr.property
+
+ def _comparator_factory(self, mapper):
+ prop = self._proxied_object
+
+ if isinstance(prop, MapperProperty):
+ if self.comparator_factory:
+ comp = self.comparator_factory(prop, mapper)
+ else:
+ comp = prop.comparator_factory(prop, mapper)
+ return comp
+ else:
+ return prop
+
+ def get_history(self, *arg, **kw):
+ attr = getattr(self.parent.class_, self.name)
+ return attr.impl.get_history(*arg, **kw)
+
+ @util.preload_module("sqlalchemy.orm.properties")
+ def set_parent(self, parent, init):
+ properties = util.preloaded.orm_properties
+
+ if self.map_column:
+ # implement the 'map_column' option.
+ if self.key not in parent.persist_selectable.c:
+ raise sa_exc.ArgumentError(
+ "Can't compile synonym '%s': no column on table "
+ "'%s' named '%s'"
+ % (
+ self.name,
+ parent.persist_selectable.description,
+ self.key,
+ )
+ )
+ elif (
+ parent.persist_selectable.c[self.key]
+ in parent._columntoproperty
+ and parent._columntoproperty[
+ parent.persist_selectable.c[self.key]
+ ].key
+ == self.name
+ ):
+ raise sa_exc.ArgumentError(
+ "Can't call map_column=True for synonym %r=%r, "
+ "a ColumnProperty already exists keyed to the name "
+ "%r for column %r"
+ % (self.key, self.name, self.name, self.key)
+ )
+ p = properties.ColumnProperty(
+ parent.persist_selectable.c[self.key]
+ )
+ parent._configure_property(self.name, p, init=init, setparent=True)
+ p._mapped_by_synonym = self.key
+
+ self.parent = parent
diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py
new file mode 100644
index 0000000..ec62560
--- /dev/null
+++ b/lib/sqlalchemy/orm/dynamic.py
@@ -0,0 +1,491 @@
+# orm/dynamic.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Dynamic collection API.
+
+Dynamic collections act like Query() objects for read operations and support
+basic add/delete mutation.
+
+"""
+
+from . import attributes
+from . import exc as orm_exc
+from . import interfaces
+from . import object_mapper
+from . import object_session
+from . import relationships
+from . import strategies
+from . import util as orm_util
+from .query import Query
+from .. import exc
+from .. import log
+from .. import util
+from ..engine import result
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy="dynamic")
+class DynaLoader(strategies.AbstractRelationshipLoader):
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+ if not self.uselist:
+ raise exc.InvalidRequestError(
+ "On relationship %s, 'dynamic' loaders cannot be used with "
+ "many-to-one/one-to-one relationships and/or "
+ "uselist=False." % self.parent_property
+ )
+ elif self.parent_property.direction not in (
+ interfaces.ONETOMANY,
+ interfaces.MANYTOMANY,
+ ):
+ util.warn(
+ "On relationship %s, 'dynamic' loaders cannot be used with "
+ "many-to-one/one-to-one relationships and/or "
+ "uselist=False. This warning will be an exception in a "
+ "future release." % self.parent_property
+ )
+
+ strategies._register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=True,
+ impl_class=DynamicAttributeImpl,
+ target_mapper=self.parent_property.mapper,
+ order_by=self.parent_property.order_by,
+ query_class=self.parent_property.query_class,
+ )
+
+
+class DynamicAttributeImpl(attributes.AttributeImpl):
+ uses_objects = True
+ default_accepts_scalar_loader = False
+ supports_population = False
+ collection = False
+ dynamic = True
+ order_by = ()
+
+ def __init__(
+ self,
+ class_,
+ key,
+ typecallable,
+ dispatch,
+ target_mapper,
+ order_by,
+ query_class=None,
+ **kw
+ ):
+ super(DynamicAttributeImpl, self).__init__(
+ class_, key, typecallable, dispatch, **kw
+ )
+ self.target_mapper = target_mapper
+ if order_by:
+ self.order_by = tuple(order_by)
+ if not query_class:
+ self.query_class = AppenderQuery
+ elif AppenderMixin in query_class.mro():
+ self.query_class = query_class
+ else:
+ self.query_class = mixin_user_query(query_class)
+
+ def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
+ if not passive & attributes.SQL_OK:
+ return self._get_collection_history(
+ state, attributes.PASSIVE_NO_INITIALIZE
+ ).added_items
+ else:
+ return self.query_class(self, state)
+
+ def get_collection(
+ self,
+ state,
+ dict_,
+ user_data=None,
+ passive=attributes.PASSIVE_NO_INITIALIZE,
+ ):
+ if not passive & attributes.SQL_OK:
+ data = self._get_collection_history(state, passive).added_items
+ else:
+ history = self._get_collection_history(state, passive)
+ data = history.added_plus_unchanged
+ return DynamicCollectionAdapter(data)
+
+ @util.memoized_property
+ def _append_token(self):
+ return attributes.Event(self, attributes.OP_APPEND)
+
+ @util.memoized_property
+ def _remove_token(self):
+ return attributes.Event(self, attributes.OP_REMOVE)
+
+ def fire_append_event(
+ self, state, dict_, value, initiator, collection_history=None
+ ):
+ if collection_history is None:
+ collection_history = self._modified_event(state, dict_)
+
+ collection_history.add_added(value)
+
+ for fn in self.dispatch.append:
+ value = fn(state, value, initiator or self._append_token)
+
+ if self.trackparent and value is not None:
+ self.sethasparent(attributes.instance_state(value), state, True)
+
+ def fire_remove_event(
+ self, state, dict_, value, initiator, collection_history=None
+ ):
+ if collection_history is None:
+ collection_history = self._modified_event(state, dict_)
+
+ collection_history.add_removed(value)
+
+ if self.trackparent and value is not None:
+ self.sethasparent(attributes.instance_state(value), state, False)
+
+ for fn in self.dispatch.remove:
+ fn(state, value, initiator or self._remove_token)
+
+ def _modified_event(self, state, dict_):
+
+ if self.key not in state.committed_state:
+ state.committed_state[self.key] = CollectionHistory(self, state)
+
+ state._modified_event(dict_, self, attributes.NEVER_SET)
+
+ # this is a hack to allow the fixtures.ComparableEntity fixture
+ # to work
+ dict_[self.key] = True
+ return state.committed_state[self.key]
+
+ def set(
+ self,
+ state,
+ dict_,
+ value,
+ initiator=None,
+ passive=attributes.PASSIVE_OFF,
+ check_old=None,
+ pop=False,
+ _adapt=True,
+ ):
+ if initiator and initiator.parent_token is self.parent_token:
+ return
+
+ if pop and value is None:
+ return
+
+ iterable = value
+ new_values = list(iterable)
+ if state.has_identity:
+ old_collection = util.IdentitySet(self.get(state, dict_))
+
+ collection_history = self._modified_event(state, dict_)
+ if not state.has_identity:
+ old_collection = collection_history.added_items
+ else:
+ old_collection = old_collection.union(
+ collection_history.added_items
+ )
+
+ idset = util.IdentitySet
+ constants = old_collection.intersection(new_values)
+ additions = idset(new_values).difference(constants)
+ removals = old_collection.difference(constants)
+
+ for member in new_values:
+ if member in additions:
+ self.fire_append_event(
+ state,
+ dict_,
+ member,
+ None,
+ collection_history=collection_history,
+ )
+
+ for member in removals:
+ self.fire_remove_event(
+ state,
+ dict_,
+ member,
+ None,
+ collection_history=collection_history,
+ )
+
+ def delete(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ def set_committed_value(self, state, dict_, value):
+ raise NotImplementedError(
+ "Dynamic attributes don't support " "collection population."
+ )
+
+ def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
+ c = self._get_collection_history(state, passive)
+ return c.as_history()
+
+ def get_all_pending(
+ self, state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE
+ ):
+ c = self._get_collection_history(state, passive)
+ return [(attributes.instance_state(x), x) for x in c.all_items]
+
+ def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
+ if self.key in state.committed_state:
+ c = state.committed_state[self.key]
+ else:
+ c = CollectionHistory(self, state)
+
+ if state.has_identity and (passive & attributes.INIT_OK):
+ return CollectionHistory(self, state, apply_to=c)
+ else:
+ return c
+
+ def append(
+ self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
+ ):
+ if initiator is not self:
+ self.fire_append_event(state, dict_, value, initiator)
+
+ def remove(
+ self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
+ ):
+ if initiator is not self:
+ self.fire_remove_event(state, dict_, value, initiator)
+
+ def pop(
+ self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF
+ ):
+ self.remove(state, dict_, value, initiator, passive=passive)
+
+
+class DynamicCollectionAdapter(object):
+ """simplified CollectionAdapter for internal API consistency"""
+
+ def __init__(self, data):
+ self.data = data
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def _reset_empty(self):
+ pass
+
+ def __len__(self):
+ return len(self.data)
+
+ def __bool__(self):
+ return True
+
+ __nonzero__ = __bool__
+
+
+class AppenderMixin(object):
+ query_class = None
+
+ def __init__(self, attr, state):
+ super(AppenderMixin, self).__init__(attr.target_mapper, None)
+ self.instance = instance = state.obj()
+ self.attr = attr
+
+ mapper = object_mapper(instance)
+ prop = mapper._props[self.attr.key]
+
+ if prop.secondary is not None:
+ # this is a hack right now. The Query only knows how to
+ # make subsequent joins() without a given left-hand side
+ # from self._from_obj[0]. We need to ensure prop.secondary
+ # is in the FROM. So we purposely put the mapper selectable
+ # in _from_obj[0] to ensure a user-defined join() later on
+ # doesn't fail, and secondary is then in _from_obj[1].
+
+ # note also, we are using the official ORM-annotated selectable
+ # from __clause_element__(), see #7868
+ self._from_obj = (prop.mapper.__clause_element__(), prop.secondary)
+
+ self._where_criteria = (
+ prop._with_parent(instance, alias_secondary=False),
+ )
+
+ if self.attr.order_by:
+ self._order_by_clauses = self.attr.order_by
+
+ def session(self):
+ sess = object_session(self.instance)
+ if (
+ sess is not None
+ and self.autoflush
+ and sess.autoflush
+ and self.instance in sess
+ ):
+ sess.flush()
+ if not orm_util.has_identity(self.instance):
+ return None
+ else:
+ return sess
+
+ session = property(session, lambda s, x: None)
+
+ def _iter(self):
+ sess = self.session
+ if sess is None:
+ state = attributes.instance_state(self.instance)
+ if state.detached:
+ util.warn(
+ "Instance %s is detached, dynamic relationship cannot "
+ "return a correct result. This warning will become "
+ "a DetachedInstanceError in a future release."
+ % (orm_util.state_str(state))
+ )
+
+ return result.IteratorResult(
+ result.SimpleResultMetaData([self.attr.class_.__name__]),
+ self.attr._get_collection_history(
+ attributes.instance_state(self.instance),
+ attributes.PASSIVE_NO_INITIALIZE,
+ ).added_items,
+ _source_supports_scalars=True,
+ ).scalars()
+ else:
+ return self._generate(sess)._iter()
+
+ def __getitem__(self, index):
+ sess = self.session
+ if sess is None:
+ return self.attr._get_collection_history(
+ attributes.instance_state(self.instance),
+ attributes.PASSIVE_NO_INITIALIZE,
+ ).indexed(index)
+ else:
+ return self._generate(sess).__getitem__(index)
+
+ def count(self):
+ sess = self.session
+ if sess is None:
+ return len(
+ self.attr._get_collection_history(
+ attributes.instance_state(self.instance),
+ attributes.PASSIVE_NO_INITIALIZE,
+ ).added_items
+ )
+ else:
+ return self._generate(sess).count()
+
+ def _generate(self, sess=None):
+ # note we're returning an entirely new Query class instance
+ # here without any assignment capabilities; the class of this
+ # query is determined by the session.
+ instance = self.instance
+ if sess is None:
+ sess = object_session(instance)
+ if sess is None:
+ raise orm_exc.DetachedInstanceError(
+ "Parent instance %s is not bound to a Session, and no "
+ "contextual session is established; lazy load operation "
+ "of attribute '%s' cannot proceed"
+ % (orm_util.instance_str(instance), self.attr.key)
+ )
+
+ if self.query_class:
+ query = self.query_class(self.attr.target_mapper, session=sess)
+ else:
+ query = sess.query(self.attr.target_mapper)
+
+ query._where_criteria = self._where_criteria
+ query._from_obj = self._from_obj
+ query._order_by_clauses = self._order_by_clauses
+
+ return query
+
+ def extend(self, iterator):
+ for item in iterator:
+ self.attr.append(
+ attributes.instance_state(self.instance),
+ attributes.instance_dict(self.instance),
+ item,
+ None,
+ )
+
+ def append(self, item):
+ self.attr.append(
+ attributes.instance_state(self.instance),
+ attributes.instance_dict(self.instance),
+ item,
+ None,
+ )
+
+ def remove(self, item):
+ self.attr.remove(
+ attributes.instance_state(self.instance),
+ attributes.instance_dict(self.instance),
+ item,
+ None,
+ )
+
+
+class AppenderQuery(AppenderMixin, Query):
+ """A dynamic query that supports basic collection storage operations."""
+
+
+def mixin_user_query(cls):
+ """Return a new class with AppenderQuery functionality layered over."""
+ name = "Appender" + cls.__name__
+ return type(name, (AppenderMixin, cls), {"query_class": cls})
+
+
+class CollectionHistory(object):
+ """Overrides AttributeHistory to receive append/remove events directly."""
+
+ def __init__(self, attr, state, apply_to=None):
+ if apply_to:
+ coll = AppenderQuery(attr, state).autoflush(False)
+ self.unchanged_items = util.OrderedIdentitySet(coll)
+ self.added_items = apply_to.added_items
+ self.deleted_items = apply_to.deleted_items
+ self._reconcile_collection = True
+ else:
+ self.deleted_items = util.OrderedIdentitySet()
+ self.added_items = util.OrderedIdentitySet()
+ self.unchanged_items = util.OrderedIdentitySet()
+ self._reconcile_collection = False
+
+ @property
+ def added_plus_unchanged(self):
+ return list(self.added_items.union(self.unchanged_items))
+
+ @property
+ def all_items(self):
+ return list(
+ self.added_items.union(self.unchanged_items).union(
+ self.deleted_items
+ )
+ )
+
+ def as_history(self):
+ if self._reconcile_collection:
+ added = self.added_items.difference(self.unchanged_items)
+ deleted = self.deleted_items.intersection(self.unchanged_items)
+ unchanged = self.unchanged_items.difference(deleted)
+ else:
+ added, unchanged, deleted = (
+ self.added_items,
+ self.unchanged_items,
+ self.deleted_items,
+ )
+ return attributes.History(list(added), list(unchanged), list(deleted))
+
+ def indexed(self, index):
+ return list(self.added_items)[index]
+
+ def add_added(self, value):
+ self.added_items.add(value)
+
+ def add_removed(self, value):
+ if value in self.added_items:
+ self.added_items.remove(value)
+ else:
+ self.deleted_items.add(value)
diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py
new file mode 100644
index 0000000..dbbfba0
--- /dev/null
+++ b/lib/sqlalchemy/orm/evaluator.py
@@ -0,0 +1,241 @@
+# orm/evaluator.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+import operator
+
+from .. import inspect
+from .. import util
+from ..sql import and_
+from ..sql import operators
+
+
+class UnevaluatableError(Exception):
+ pass
+
+
+class _NoObject(operators.ColumnOperators):
+ def operate(self, *arg, **kw):
+ return None
+
+ def reverse_operate(self, *arg, **kw):
+ return None
+
+
+_NO_OBJECT = _NoObject()
+
+_straight_ops = set(
+ getattr(operators, op)
+ for op in (
+ "add",
+ "mul",
+ "sub",
+ "div",
+ "mod",
+ "truediv",
+ "lt",
+ "le",
+ "ne",
+ "gt",
+ "ge",
+ "eq",
+ )
+)
+
+_extended_ops = {
+ operators.in_op: (lambda a, b: a in b if a is not _NO_OBJECT else None),
+ operators.not_in_op: (
+ lambda a, b: a not in b if a is not _NO_OBJECT else None
+ ),
+}
+
+_notimplemented_ops = set(
+ getattr(operators, op)
+ for op in (
+ "like_op",
+ "not_like_op",
+ "ilike_op",
+ "not_ilike_op",
+ "startswith_op",
+ "between_op",
+ "endswith_op",
+ "concat_op",
+ )
+)
+
+
+class EvaluatorCompiler(object):
+ def __init__(self, target_cls=None):
+ self.target_cls = target_cls
+
+ def process(self, *clauses):
+ if len(clauses) > 1:
+ clause = and_(*clauses)
+ elif clauses:
+ clause = clauses[0]
+
+ meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
+ if not meth:
+ raise UnevaluatableError(
+ "Cannot evaluate %s" % type(clause).__name__
+ )
+ return meth(clause)
+
+ def visit_grouping(self, clause):
+ return self.process(clause.element)
+
+ def visit_null(self, clause):
+ return lambda obj: None
+
+ def visit_false(self, clause):
+ return lambda obj: False
+
+ def visit_true(self, clause):
+ return lambda obj: True
+
+ def visit_column(self, clause):
+ if "parentmapper" in clause._annotations:
+ parentmapper = clause._annotations["parentmapper"]
+ if self.target_cls and not issubclass(
+ self.target_cls, parentmapper.class_
+ ):
+ raise UnevaluatableError(
+ "Can't evaluate criteria against alternate class %s"
+ % parentmapper.class_
+ )
+ key = parentmapper._columntoproperty[clause].key
+ else:
+ key = clause.key
+ if (
+ self.target_cls
+ and key in inspect(self.target_cls).column_attrs
+ ):
+ util.warn(
+ "Evaluating non-mapped column expression '%s' onto "
+ "ORM instances; this is a deprecated use case. Please "
+ "make use of the actual mapped columns in ORM-evaluated "
+ "UPDATE / DELETE expressions." % clause
+ )
+ else:
+ raise UnevaluatableError("Cannot evaluate column: %s" % clause)
+
+ get_corresponding_attr = operator.attrgetter(key)
+ return (
+ lambda obj: get_corresponding_attr(obj)
+ if obj is not None
+ else _NO_OBJECT
+ )
+
+ def visit_tuple(self, clause):
+ return self.visit_clauselist(clause)
+
+ def visit_clauselist(self, clause):
+ evaluators = list(map(self.process, clause.clauses))
+ if clause.operator is operators.or_:
+
+ def evaluate(obj):
+ has_null = False
+ for sub_evaluate in evaluators:
+ value = sub_evaluate(obj)
+ if value:
+ return True
+ has_null = has_null or value is None
+ if has_null:
+ return None
+ return False
+
+ elif clause.operator is operators.and_:
+
+ def evaluate(obj):
+ for sub_evaluate in evaluators:
+ value = sub_evaluate(obj)
+ if not value:
+ if value is None or value is _NO_OBJECT:
+ return None
+ return False
+ return True
+
+ elif clause.operator is operators.comma_op:
+
+ def evaluate(obj):
+ values = []
+ for sub_evaluate in evaluators:
+ value = sub_evaluate(obj)
+ if value is None or value is _NO_OBJECT:
+ return None
+ values.append(value)
+ return tuple(values)
+
+ else:
+ raise UnevaluatableError(
+ "Cannot evaluate clauselist with operator %s" % clause.operator
+ )
+
+ return evaluate
+
+ def visit_binary(self, clause):
+ eval_left, eval_right = list(
+ map(self.process, [clause.left, clause.right])
+ )
+ operator = clause.operator
+ if operator is operators.is_:
+
+ def evaluate(obj):
+ return eval_left(obj) == eval_right(obj)
+
+ elif operator is operators.is_not:
+
+ def evaluate(obj):
+ return eval_left(obj) != eval_right(obj)
+
+ elif operator in _extended_ops:
+
+ def evaluate(obj):
+ left_val = eval_left(obj)
+ right_val = eval_right(obj)
+ if left_val is None or right_val is None:
+ return None
+
+ return _extended_ops[operator](left_val, right_val)
+
+ elif operator in _straight_ops:
+
+ def evaluate(obj):
+ left_val = eval_left(obj)
+ right_val = eval_right(obj)
+ if left_val is None or right_val is None:
+ return None
+ return operator(eval_left(obj), eval_right(obj))
+
+ else:
+ raise UnevaluatableError(
+ "Cannot evaluate %s with operator %s"
+ % (type(clause).__name__, clause.operator)
+ )
+ return evaluate
+
+ def visit_unary(self, clause):
+ eval_inner = self.process(clause.element)
+ if clause.operator is operators.inv:
+
+ def evaluate(obj):
+ value = eval_inner(obj)
+ if value is None:
+ return None
+ return not value
+
+ return evaluate
+ raise UnevaluatableError(
+ "Cannot evaluate %s with operator %s"
+ % (type(clause).__name__, clause.operator)
+ )
+
+ def visit_bindparam(self, clause):
+ if clause.callable:
+ val = clause.callable()
+ else:
+ val = clause.value
+ return lambda obj: val
diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py
new file mode 100644
index 0000000..39659c7
--- /dev/null
+++ b/lib/sqlalchemy/orm/events.py
@@ -0,0 +1,2876 @@
+# orm/events.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""ORM event interfaces.
+
+"""
+import weakref
+
+from . import instrumentation
+from . import interfaces
+from . import mapperlib
+from .attributes import QueryableAttribute
+from .base import _mapper_or_none
+from .query import Query
+from .scoping import scoped_session
+from .session import Session
+from .session import sessionmaker
+from .. import event
+from .. import exc
+from .. import util
+from ..util.compat import inspect_getfullargspec
+
+
+class InstrumentationEvents(event.Events):
+ """Events related to class instrumentation events.
+
+ The listeners here support being established against
+ any new style class, that is any object that is a subclass
+ of 'type'. Events will then be fired off for events
+ against that class. If the "propagate=True" flag is passed
+ to event.listen(), the event will fire off for subclasses
+ of that class as well.
+
+ The Python ``type`` builtin is also accepted as a target,
+ which when used has the effect of events being emitted
+ for all classes.
+
+ Note the "propagate" flag here is defaulted to ``True``,
+ unlike the other class level events where it defaults
+ to ``False``. This means that new subclasses will also
+ be the subject of these events, when a listener
+ is established on a superclass.
+
+ """
+
+ _target_class_doc = "SomeBaseClass"
+ _dispatch_target = instrumentation.InstrumentationFactory
+
+ @classmethod
+ def _accept_with(cls, target):
+ if isinstance(target, type):
+ return _InstrumentationEventsHold(target)
+ else:
+ return None
+
+ @classmethod
+ def _listen(cls, event_key, propagate=True, **kw):
+ target, identifier, fn = (
+ event_key.dispatch_target,
+ event_key.identifier,
+ event_key._listen_fn,
+ )
+
+ def listen(target_cls, *arg):
+ listen_cls = target()
+
+ # if weakref were collected, however this is not something
+ # that normally happens. it was occurring during test teardown
+ # between mapper/registry/instrumentation_manager, however this
+ # interaction was changed to not rely upon the event system.
+ if listen_cls is None:
+ return None
+
+ if propagate and issubclass(target_cls, listen_cls):
+ return fn(target_cls, *arg)
+ elif not propagate and target_cls is listen_cls:
+ return fn(target_cls, *arg)
+
+ def remove(ref):
+ key = event.registry._EventKey(
+ None,
+ identifier,
+ listen,
+ instrumentation._instrumentation_factory,
+ )
+ getattr(
+ instrumentation._instrumentation_factory.dispatch, identifier
+ ).remove(key)
+
+ target = weakref.ref(target.class_, remove)
+
+ event_key.with_dispatch_target(
+ instrumentation._instrumentation_factory
+ ).with_wrapper(listen).base_listen(**kw)
+
+ @classmethod
+ def _clear(cls):
+ super(InstrumentationEvents, cls)._clear()
+ instrumentation._instrumentation_factory.dispatch._clear()
+
+ def class_instrument(self, cls):
+ """Called after the given class is instrumented.
+
+ To get at the :class:`.ClassManager`, use
+ :func:`.manager_of_class`.
+
+ """
+
+ def class_uninstrument(self, cls):
+ """Called before the given class is uninstrumented.
+
+ To get at the :class:`.ClassManager`, use
+ :func:`.manager_of_class`.
+
+ """
+
+ def attribute_instrument(self, cls, key, inst):
+ """Called when an attribute is instrumented."""
+
+
+class _InstrumentationEventsHold(object):
+ """temporary marker object used to transfer from _accept_with() to
+ _listen() on the InstrumentationEvents class.
+
+ """
+
+ def __init__(self, class_):
+ self.class_ = class_
+
+ dispatch = event.dispatcher(InstrumentationEvents)
+
+
+class InstanceEvents(event.Events):
+ """Define events specific to object lifecycle.
+
+ e.g.::
+
+ from sqlalchemy import event
+
+ def my_load_listener(target, context):
+ print("on load!")
+
+ event.listen(SomeClass, 'load', my_load_listener)
+
+ Available targets include:
+
+ * mapped classes
+ * unmapped superclasses of mapped or to-be-mapped classes
+ (using the ``propagate=True`` flag)
+ * :class:`_orm.Mapper` objects
+ * the :class:`_orm.Mapper` class itself and the :func:`.mapper`
+ function indicate listening for all mappers.
+
+ Instance events are closely related to mapper events, but
+ are more specific to the instance and its instrumentation,
+ rather than its system of persistence.
+
+ When using :class:`.InstanceEvents`, several modifiers are
+ available to the :func:`.event.listen` function.
+
+ :param propagate=False: When True, the event listener should
+ be applied to all inheriting classes as well as the
+ class which is the target of this listener.
+ :param raw=False: When True, the "target" argument passed
+ to applicable event listener functions will be the
+ instance's :class:`.InstanceState` management
+ object, rather than the mapped instance itself.
+ :param restore_load_context=False: Applies to the
+ :meth:`.InstanceEvents.load` and :meth:`.InstanceEvents.refresh`
+ events. Restores the loader context of the object when the event
+ hook is complete, so that ongoing eager load operations continue
+ to target the object appropriately. A warning is emitted if the
+ object is moved to a new loader context from within one of these
+ events if this flag is not set.
+
+ .. versionadded:: 1.3.14
+
+
+ """
+
+ _target_class_doc = "SomeClass"
+
+ _dispatch_target = instrumentation.ClassManager
+
+ @classmethod
+ def _new_classmanager_instance(cls, class_, classmanager):
+ _InstanceEventsHold.populate(class_, classmanager)
+
+ @classmethod
+ @util.preload_module("sqlalchemy.orm")
+ def _accept_with(cls, target):
+ orm = util.preloaded.orm
+
+ if isinstance(target, instrumentation.ClassManager):
+ return target
+ elif isinstance(target, mapperlib.Mapper):
+ return target.class_manager
+ elif target is orm.mapper:
+ return instrumentation.ClassManager
+ elif isinstance(target, type):
+ if issubclass(target, mapperlib.Mapper):
+ return instrumentation.ClassManager
+ else:
+ manager = instrumentation.manager_of_class(target)
+ if manager:
+ return manager
+ else:
+ return _InstanceEventsHold(target)
+ return None
+
+ @classmethod
+ def _listen(
+ cls,
+ event_key,
+ raw=False,
+ propagate=False,
+ restore_load_context=False,
+ **kw
+ ):
+ target, fn = (event_key.dispatch_target, event_key._listen_fn)
+
+ if not raw or restore_load_context:
+
+ def wrap(state, *arg, **kw):
+ if not raw:
+ target = state.obj()
+ else:
+ target = state
+ if restore_load_context:
+ runid = state.runid
+ try:
+ return fn(target, *arg, **kw)
+ finally:
+ if restore_load_context:
+ state.runid = runid
+
+ event_key = event_key.with_wrapper(wrap)
+
+ event_key.base_listen(propagate=propagate, **kw)
+
+ if propagate:
+ for mgr in target.subclass_managers(True):
+ event_key.with_dispatch_target(mgr).base_listen(propagate=True)
+
+ @classmethod
+ def _clear(cls):
+ super(InstanceEvents, cls)._clear()
+ _InstanceEventsHold._clear()
+
+ def first_init(self, manager, cls):
+ """Called when the first instance of a particular mapping is called.
+
+ This event is called when the ``__init__`` method of a class
+ is called the first time for that particular class. The event
+ invokes before ``__init__`` actually proceeds as well as before
+ the :meth:`.InstanceEvents.init` event is invoked.
+
+ """
+
+ def init(self, target, args, kwargs):
+ """Receive an instance when its constructor is called.
+
+ This method is only called during a userland construction of
+ an object, in conjunction with the object's constructor, e.g.
+ its ``__init__`` method. It is not called when an object is
+ loaded from the database; see the :meth:`.InstanceEvents.load`
+ event in order to intercept a database load.
+
+ The event is called before the actual ``__init__`` constructor
+ of the object is called. The ``kwargs`` dictionary may be
+ modified in-place in order to affect what is passed to
+ ``__init__``.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param args: positional arguments passed to the ``__init__`` method.
+ This is passed as a tuple and is currently immutable.
+ :param kwargs: keyword arguments passed to the ``__init__`` method.
+ This structure *can* be altered in place.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.init_failure`
+
+ :meth:`.InstanceEvents.load`
+
+ """
+
+ def init_failure(self, target, args, kwargs):
+ """Receive an instance when its constructor has been called,
+ and raised an exception.
+
+ This method is only called during a userland construction of
+ an object, in conjunction with the object's constructor, e.g.
+ its ``__init__`` method. It is not called when an object is loaded
+ from the database.
+
+ The event is invoked after an exception raised by the ``__init__``
+ method is caught. After the event
+ is invoked, the original exception is re-raised outwards, so that
+ the construction of the object still raises an exception. The
+ actual exception and stack trace raised should be present in
+ ``sys.exc_info()``.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param args: positional arguments that were passed to the ``__init__``
+ method.
+ :param kwargs: keyword arguments that were passed to the ``__init__``
+ method.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.init`
+
+ :meth:`.InstanceEvents.load`
+
+ """
+
+ def load(self, target, context):
+ """Receive an object instance after it has been created via
+ ``__new__``, and after initial attribute population has
+ occurred.
+
+ This typically occurs when the instance is created based on
+ incoming result rows, and is only called once for that
+ instance's lifetime.
+
+ .. warning::
+
+ During a result-row load, this event is invoked when the
+ first row received for this instance is processed. When using
+ eager loading with collection-oriented attributes, the additional
+ rows that are to be loaded / processed in order to load subsequent
+ collection items have not occurred yet. This has the effect
+ both that collections will not be fully loaded, as well as that
+ if an operation occurs within this event handler that emits
+ another database load operation for the object, the "loading
+ context" for the object can change and interfere with the
+ existing eager loaders still in progress.
+
+ Examples of what can cause the "loading context" to change within
+ the event handler include, but are not necessarily limited to:
+
+ * accessing deferred attributes that weren't part of the row,
+ will trigger an "undefer" operation and refresh the object
+
+ * accessing attributes on a joined-inheritance subclass that
+ weren't part of the row, will trigger a refresh operation.
+
+ As of SQLAlchemy 1.3.14, a warning is emitted when this occurs. The
+ :paramref:`.InstanceEvents.restore_load_context` option may be
+ used on the event to prevent this warning; this will ensure that
+ the existing loading context is maintained for the object after the
+ event is called::
+
+ @event.listens_for(
+ SomeClass, "load", restore_load_context=True)
+ def on_load(instance, context):
+ instance.some_unloaded_attribute
+
+ .. versionchanged:: 1.3.14 Added
+ :paramref:`.InstanceEvents.restore_load_context`
+ and :paramref:`.SessionEvents.restore_load_context` flags which
+ apply to "on load" events, which will ensure that the loading
+ context for an object is restored when the event hook is
+ complete; a warning is emitted if the load context of the object
+ changes without this flag being set.
+
+
+ The :meth:`.InstanceEvents.load` event is also available in a
+ class-method decorator format called :func:`_orm.reconstructor`.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param context: the :class:`.QueryContext` corresponding to the
+ current :class:`_query.Query` in progress. This argument may be
+ ``None`` if the load does not correspond to a :class:`_query.Query`,
+ such as during :meth:`.Session.merge`.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.init`
+
+ :meth:`.InstanceEvents.refresh`
+
+ :meth:`.SessionEvents.loaded_as_persistent`
+
+ :ref:`mapping_constructors`
+
+ """
+
+ def refresh(self, target, context, attrs):
+ """Receive an object instance after one or more attributes have
+ been refreshed from a query.
+
+ Contrast this to the :meth:`.InstanceEvents.load` method, which
+ is invoked when the object is first loaded from a query.
+
+ .. note:: This event is invoked within the loader process before
+ eager loaders may have been completed, and the object's state may
+ not be complete. Additionally, invoking row-level refresh
+ operations on the object will place the object into a new loader
+ context, interfering with the existing load context. See the note
+ on :meth:`.InstanceEvents.load` for background on making use of the
+ :paramref:`.InstanceEvents.restore_load_context` parameter, in
+ order to resolve this scenario.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param context: the :class:`.QueryContext` corresponding to the
+ current :class:`_query.Query` in progress.
+ :param attrs: sequence of attribute names which
+ were populated, or None if all column-mapped, non-deferred
+ attributes were populated.
+
+ .. seealso::
+
+ :meth:`.InstanceEvents.load`
+
+ """
+
+ def refresh_flush(self, target, flush_context, attrs):
+ """Receive an object instance after one or more attributes that
+ contain a column-level default or onupdate handler have been refreshed
+ during persistence of the object's state.
+
+ This event is the same as :meth:`.InstanceEvents.refresh` except
+ it is invoked within the unit of work flush process, and includes
+ only non-primary-key columns that have column level default or
+ onupdate handlers, including Python callables as well as server side
+ defaults and triggers which may be fetched via the RETURNING clause.
+
+ .. note::
+
+ While the :meth:`.InstanceEvents.refresh_flush` event is triggered
+ for an object that was INSERTed as well as for an object that was
+ UPDATEd, the event is geared primarily towards the UPDATE process;
+ it is mostly an internal artifact that INSERT actions can also
+ trigger this event, and note that **primary key columns for an
+ INSERTed row are explicitly omitted** from this event. In order to
+ intercept the newly INSERTed state of an object, the
+ :meth:`.SessionEvents.pending_to_persistent` and
+ :meth:`.MapperEvents.after_insert` are better choices.
+
+ .. versionadded:: 1.0.5
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param flush_context: Internal :class:`.UOWTransaction` object
+ which handles the details of the flush.
+ :param attrs: sequence of attribute names which
+ were populated.
+
+ .. seealso::
+
+ :ref:`orm_server_defaults`
+
+ :ref:`metadata_defaults_toplevel`
+
+ """
+
+ def expire(self, target, attrs):
+ """Receive an object instance after its attributes or some subset
+ have been expired.
+
+ 'keys' is a list of attribute names. If None, the entire
+ state was expired.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param attrs: sequence of attribute
+ names which were expired, or None if all attributes were
+ expired.
+
+ """
+
+ def pickle(self, target, state_dict):
+ """Receive an object instance when its associated state is
+ being pickled.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param state_dict: the dictionary returned by
+ :class:`.InstanceState.__getstate__`, containing the state
+ to be pickled.
+
+ """
+
+ def unpickle(self, target, state_dict):
+ """Receive an object instance after its associated state has
+ been unpickled.
+
+ :param target: the mapped instance. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :param state_dict: the dictionary sent to
+ :class:`.InstanceState.__setstate__`, containing the state
+ dictionary which was pickled.
+
+ """
+
+
+class _EventsHold(event.RefCollection):
+ """Hold onto listeners against unmapped, uninstrumented classes.
+
+ Establish _listen() for that class' mapper/instrumentation when
+ those objects are created for that class.
+
+ """
+
+ def __init__(self, class_):
+ self.class_ = class_
+
+ @classmethod
+ def _clear(cls):
+ cls.all_holds.clear()
+
+ class HoldEvents(object):
+ _dispatch_target = None
+
+ @classmethod
+ def _listen(
+ cls, event_key, raw=False, propagate=False, retval=False, **kw
+ ):
+ target = event_key.dispatch_target
+
+ if target.class_ in target.all_holds:
+ collection = target.all_holds[target.class_]
+ else:
+ collection = target.all_holds[target.class_] = {}
+
+ event.registry._stored_in_collection(event_key, target)
+ collection[event_key._key] = (
+ event_key,
+ raw,
+ propagate,
+ retval,
+ kw,
+ )
+
+ if propagate:
+ stack = list(target.class_.__subclasses__())
+ while stack:
+ subclass = stack.pop(0)
+ stack.extend(subclass.__subclasses__())
+ subject = target.resolve(subclass)
+ if subject is not None:
+ # we are already going through __subclasses__()
+ # so leave generic propagate flag False
+ event_key.with_dispatch_target(subject).listen(
+ raw=raw, propagate=False, retval=retval, **kw
+ )
+
+ def remove(self, event_key):
+ target = event_key.dispatch_target
+
+ if isinstance(target, _EventsHold):
+ collection = target.all_holds[target.class_]
+ del collection[event_key._key]
+
+ @classmethod
+ def populate(cls, class_, subject):
+ for subclass in class_.__mro__:
+ if subclass in cls.all_holds:
+ collection = cls.all_holds[subclass]
+ for (
+ event_key,
+ raw,
+ propagate,
+ retval,
+ kw,
+ ) in collection.values():
+ if propagate or subclass is class_:
+ # since we can't be sure in what order different
+ # classes in a hierarchy are triggered with
+ # populate(), we rely upon _EventsHold for all event
+ # assignment, instead of using the generic propagate
+ # flag.
+ event_key.with_dispatch_target(subject).listen(
+ raw=raw, propagate=False, retval=retval, **kw
+ )
+
+
+class _InstanceEventsHold(_EventsHold):
+ all_holds = weakref.WeakKeyDictionary()
+
+ def resolve(self, class_):
+ return instrumentation.manager_of_class(class_)
+
+ class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
+ pass
+
+ dispatch = event.dispatcher(HoldInstanceEvents)
+
+
+class MapperEvents(event.Events):
+ """Define events specific to mappings.
+
+ e.g.::
+
+ from sqlalchemy import event
+
+ def my_before_insert_listener(mapper, connection, target):
+ # execute a stored procedure upon INSERT,
+ # apply the value to the row to be inserted
+ target.calculated_value = connection.execute(
+ text("select my_special_function(%d)" % target.special_number)
+ ).scalar()
+
+ # associate the listener function with SomeClass,
+ # to execute during the "before_insert" hook
+ event.listen(
+ SomeClass, 'before_insert', my_before_insert_listener)
+
+ Available targets include:
+
+ * mapped classes
+ * unmapped superclasses of mapped or to-be-mapped classes
+ (using the ``propagate=True`` flag)
+ * :class:`_orm.Mapper` objects
+ * the :class:`_orm.Mapper` class itself and the :func:`.mapper`
+ function indicate listening for all mappers.
+
+ Mapper events provide hooks into critical sections of the
+ mapper, including those related to object instrumentation,
+ object loading, and object persistence. In particular, the
+ persistence methods :meth:`~.MapperEvents.before_insert`,
+ and :meth:`~.MapperEvents.before_update` are popular
+ places to augment the state being persisted - however, these
+ methods operate with several significant restrictions. The
+ user is encouraged to evaluate the
+ :meth:`.SessionEvents.before_flush` and
+ :meth:`.SessionEvents.after_flush` methods as more
+ flexible and user-friendly hooks in which to apply
+ additional database state during a flush.
+
+ When using :class:`.MapperEvents`, several modifiers are
+ available to the :func:`.event.listen` function.
+
+ :param propagate=False: When True, the event listener should
+ be applied to all inheriting mappers and/or the mappers of
+ inheriting classes, as well as any
+ mapper which is the target of this listener.
+ :param raw=False: When True, the "target" argument passed
+ to applicable event listener functions will be the
+ instance's :class:`.InstanceState` management
+ object, rather than the mapped instance itself.
+ :param retval=False: when True, the user-defined event function
+ must have a return value, the purpose of which is either to
+ control subsequent event propagation, or to otherwise alter
+ the operation in progress by the mapper. Possible return
+ values are:
+
+ * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
+ processing normally.
+ * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
+ event handlers in the chain.
+ * other values - the return value specified by specific listeners.
+
+ """
+
+ _target_class_doc = "SomeClass"
+ _dispatch_target = mapperlib.Mapper
+
+ @classmethod
+ def _new_mapper_instance(cls, class_, mapper):
+ _MapperEventsHold.populate(class_, mapper)
+
+ @classmethod
+ @util.preload_module("sqlalchemy.orm")
+ def _accept_with(cls, target):
+ orm = util.preloaded.orm
+
+ if target is orm.mapper:
+ return mapperlib.Mapper
+ elif isinstance(target, type):
+ if issubclass(target, mapperlib.Mapper):
+ return target
+ else:
+ mapper = _mapper_or_none(target)
+ if mapper is not None:
+ return mapper
+ else:
+ return _MapperEventsHold(target)
+ else:
+ return target
+
+ @classmethod
+ def _listen(
+ cls, event_key, raw=False, retval=False, propagate=False, **kw
+ ):
+ target, identifier, fn = (
+ event_key.dispatch_target,
+ event_key.identifier,
+ event_key._listen_fn,
+ )
+
+ if (
+ identifier in ("before_configured", "after_configured")
+ and target is not mapperlib.Mapper
+ ):
+ util.warn(
+ "'before_configured' and 'after_configured' ORM events "
+ "only invoke with the mapper() function or Mapper class "
+ "as the target."
+ )
+
+ if not raw or not retval:
+ if not raw:
+ meth = getattr(cls, identifier)
+ try:
+ target_index = (
+ inspect_getfullargspec(meth)[0].index("target") - 1
+ )
+ except ValueError:
+ target_index = None
+
+ def wrap(*arg, **kw):
+ if not raw and target_index is not None:
+ arg = list(arg)
+ arg[target_index] = arg[target_index].obj()
+ if not retval:
+ fn(*arg, **kw)
+ return interfaces.EXT_CONTINUE
+ else:
+ return fn(*arg, **kw)
+
+ event_key = event_key.with_wrapper(wrap)
+
+ if propagate:
+ for mapper in target.self_and_descendants:
+ event_key.with_dispatch_target(mapper).base_listen(
+ propagate=True, **kw
+ )
+ else:
+ event_key.base_listen(**kw)
+
+ @classmethod
+ def _clear(cls):
+ super(MapperEvents, cls)._clear()
+ _MapperEventsHold._clear()
+
+ def instrument_class(self, mapper, class_):
+ r"""Receive a class when the mapper is first constructed,
+ before instrumentation is applied to the mapped class.
+
+ This event is the earliest phase of mapper construction.
+ Most attributes of the mapper are not yet initialized.
+
+ This listener can either be applied to the :class:`_orm.Mapper`
+ class overall, or to any un-mapped class which serves as a base
+ for classes that will be mapped (using the ``propagate=True`` flag)::
+
+ Base = declarative_base()
+
+ @event.listens_for(Base, "instrument_class", propagate=True)
+ def on_new_class(mapper, cls_):
+ " ... "
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param class\_: the mapped class.
+
+ """
+
+ def before_mapper_configured(self, mapper, class_):
+ """Called right before a specific mapper is to be configured.
+
+ This event is intended to allow a specific mapper to be skipped during
+ the configure step, by returning the :attr:`.orm.interfaces.EXT_SKIP`
+ symbol which indicates to the :func:`.configure_mappers` call that this
+ particular mapper (or hierarchy of mappers, if ``propagate=True`` is
+ used) should be skipped in the current configuration run. When one or
+ more mappers are skipped, the he "new mappers" flag will remain set,
+ meaning the :func:`.configure_mappers` function will continue to be
+ called when mappers are used, to continue to try to configure all
+ available mappers.
+
+ In comparison to the other configure-level events,
+ :meth:`.MapperEvents.before_configured`,
+ :meth:`.MapperEvents.after_configured`, and
+ :meth:`.MapperEvents.mapper_configured`, the
+ :meth;`.MapperEvents.before_mapper_configured` event provides for a
+ meaningful return value when it is registered with the ``retval=True``
+ parameter.
+
+ .. versionadded:: 1.3
+
+ e.g.::
+
+ from sqlalchemy.orm import EXT_SKIP
+
+ Base = declarative_base()
+
+ DontConfigureBase = declarative_base()
+
+ @event.listens_for(
+ DontConfigureBase,
+ "before_mapper_configured", retval=True, propagate=True)
+ def dont_configure(mapper, cls):
+ return EXT_SKIP
+
+
+ .. seealso::
+
+ :meth:`.MapperEvents.before_configured`
+
+ :meth:`.MapperEvents.after_configured`
+
+ :meth:`.MapperEvents.mapper_configured`
+
+ """
+
+ def mapper_configured(self, mapper, class_):
+ r"""Called when a specific mapper has completed its own configuration
+ within the scope of the :func:`.configure_mappers` call.
+
+ The :meth:`.MapperEvents.mapper_configured` event is invoked
+ for each mapper that is encountered when the
+ :func:`_orm.configure_mappers` function proceeds through the current
+ list of not-yet-configured mappers.
+ :func:`_orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
+
+ When the event is called, the mapper should be in its final
+ state, but **not including backrefs** that may be invoked from
+ other mappers; they might still be pending within the
+ configuration operation. Bidirectional relationships that
+ are instead configured via the
+ :paramref:`.orm.relationship.back_populates` argument
+ *will* be fully available, since this style of relationship does not
+ rely upon other possibly-not-configured mappers to know that they
+ exist.
+
+ For an event that is guaranteed to have **all** mappers ready
+ to go including backrefs that are defined only on other
+ mappings, use the :meth:`.MapperEvents.after_configured`
+ event; this event invokes only after all known mappings have been
+ fully configured.
+
+ The :meth:`.MapperEvents.mapper_configured` event, unlike
+ :meth:`.MapperEvents.before_configured` or
+ :meth:`.MapperEvents.after_configured`,
+ is called for each mapper/class individually, and the mapper is
+ passed to the event itself. It also is called exactly once for
+ a particular mapper. The event is therefore useful for
+ configurational steps that benefit from being invoked just once
+ on a specific mapper basis, which don't require that "backref"
+ configurations are necessarily ready yet.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param class\_: the mapped class.
+
+ .. seealso::
+
+ :meth:`.MapperEvents.before_configured`
+
+ :meth:`.MapperEvents.after_configured`
+
+ :meth:`.MapperEvents.before_mapper_configured`
+
+ """
+ # TODO: need coverage for this event
+
+ def before_configured(self):
+ """Called before a series of mappers have been configured.
+
+ The :meth:`.MapperEvents.before_configured` event is invoked
+ each time the :func:`_orm.configure_mappers` function is
+ invoked, before the function has done any of its work.
+ :func:`_orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
+
+ This event can **only** be applied to the :class:`_orm.Mapper` class
+ or :func:`.mapper` function, and not to individual mappings or
+ mapped classes. It is only invoked for all mappings as a whole::
+
+ from sqlalchemy.orm import mapper
+
+ @event.listens_for(mapper, "before_configured")
+ def go():
+ # ...
+
+ Contrast this event to :meth:`.MapperEvents.after_configured`,
+ which is invoked after the series of mappers has been configured,
+ as well as :meth:`.MapperEvents.before_mapper_configured`
+ and :meth:`.MapperEvents.mapper_configured`, which are both invoked
+ on a per-mapper basis.
+
+ Theoretically this event is called once per
+ application, but is actually called any time new mappers
+ are to be affected by a :func:`_orm.configure_mappers`
+ call. If new mappings are constructed after existing ones have
+ already been used, this event will likely be called again. To ensure
+ that a particular event is only called once and no further, the
+ ``once=True`` argument (new in 0.9.4) can be applied::
+
+ from sqlalchemy.orm import mapper
+
+ @event.listens_for(mapper, "before_configured", once=True)
+ def go():
+ # ...
+
+
+ .. versionadded:: 0.9.3
+
+
+ .. seealso::
+
+ :meth:`.MapperEvents.before_mapper_configured`
+
+ :meth:`.MapperEvents.mapper_configured`
+
+ :meth:`.MapperEvents.after_configured`
+
+ """
+
+ def after_configured(self):
+ """Called after a series of mappers have been configured.
+
+ The :meth:`.MapperEvents.after_configured` event is invoked
+ each time the :func:`_orm.configure_mappers` function is
+ invoked, after the function has completed its work.
+ :func:`_orm.configure_mappers` is typically invoked
+ automatically as mappings are first used, as well as each time
+ new mappers have been made available and new mapper use is
+ detected.
+
+ Contrast this event to the :meth:`.MapperEvents.mapper_configured`
+ event, which is called on a per-mapper basis while the configuration
+ operation proceeds; unlike that event, when this event is invoked,
+ all cross-configurations (e.g. backrefs) will also have been made
+ available for any mappers that were pending.
+ Also contrast to :meth:`.MapperEvents.before_configured`,
+ which is invoked before the series of mappers has been configured.
+
+ This event can **only** be applied to the :class:`_orm.Mapper` class
+ or :func:`.mapper` function, and not to individual mappings or
+ mapped classes. It is only invoked for all mappings as a whole::
+
+ from sqlalchemy.orm import mapper
+
+ @event.listens_for(mapper, "after_configured")
+ def go():
+ # ...
+
+ Theoretically this event is called once per
+ application, but is actually called any time new mappers
+ have been affected by a :func:`_orm.configure_mappers`
+ call. If new mappings are constructed after existing ones have
+ already been used, this event will likely be called again. To ensure
+ that a particular event is only called once and no further, the
+ ``once=True`` argument (new in 0.9.4) can be applied::
+
+ from sqlalchemy.orm import mapper
+
+ @event.listens_for(mapper, "after_configured", once=True)
+ def go():
+ # ...
+
+ .. seealso::
+
+ :meth:`.MapperEvents.before_mapper_configured`
+
+ :meth:`.MapperEvents.mapper_configured`
+
+ :meth:`.MapperEvents.before_configured`
+
+ """
+
+ def before_insert(self, mapper, connection, target):
+ """Receive an object instance before an INSERT statement
+ is emitted corresponding to that instance.
+
+ This event is used to modify local, non-object related
+ attributes on the instance before an INSERT occurs, as well
+ as to emit additional SQL statements on the given
+ connection.
+
+ The event is often called for a batch of objects of the
+ same class before their INSERT statements are emitted at
+ once in a later step. In the extremely rare case that
+ this is not desirable, the :func:`.mapper` can be
+ configured with ``batch=False``, which will cause
+ batches of instances to be broken up into individual
+ (and more poorly performing) event->persist->event
+ steps.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit INSERT statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being persisted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_insert(self, mapper, connection, target):
+ """Receive an object instance after an INSERT statement
+ is emitted corresponding to that instance.
+
+ This event is used to modify in-Python-only
+ state on the instance after an INSERT occurs, as well
+ as to emit additional SQL statements on the given
+ connection.
+
+ The event is often called for a batch of objects of the
+ same class after their INSERT statements have been
+ emitted at once in a previous step. In the extremely
+ rare case that this is not desirable, the
+ :func:`.mapper` can be configured with ``batch=False``,
+ which will cause batches of instances to be broken up
+ into individual (and more poorly performing)
+ event->persist->event steps.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit INSERT statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being persisted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def before_update(self, mapper, connection, target):
+ """Receive an object instance before an UPDATE statement
+ is emitted corresponding to that instance.
+
+ This event is used to modify local, non-object related
+ attributes on the instance before an UPDATE occurs, as well
+ as to emit additional SQL statements on the given
+ connection.
+
+ This method is called for all instances that are
+ marked as "dirty", *even those which have no net changes
+ to their column-based attributes*. An object is marked
+ as dirty when any of its column-based attributes have a
+ "set attribute" operation called or when any of its
+ collections are modified. If, at update time, no
+ column-based attributes have any net changes, no UPDATE
+ statement will be issued. This means that an instance
+ being sent to :meth:`~.MapperEvents.before_update` is
+ *not* a guarantee that an UPDATE statement will be
+ issued, although you can affect the outcome here by
+ modifying attributes so that a net change in value does
+ exist.
+
+ To detect if the column-based attributes on the object have net
+ changes, and will therefore generate an UPDATE statement, use
+ ``object_session(instance).is_modified(instance,
+ include_collections=False)``.
+
+ The event is often called for a batch of objects of the
+ same class before their UPDATE statements are emitted at
+ once in a later step. In the extremely rare case that
+ this is not desirable, the :func:`.mapper` can be
+ configured with ``batch=False``, which will cause
+ batches of instances to be broken up into individual
+ (and more poorly performing) event->persist->event
+ steps.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit UPDATE statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being persisted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_update(self, mapper, connection, target):
+ """Receive an object instance after an UPDATE statement
+ is emitted corresponding to that instance.
+
+ This event is used to modify in-Python-only
+ state on the instance after an UPDATE occurs, as well
+ as to emit additional SQL statements on the given
+ connection.
+
+ This method is called for all instances that are
+ marked as "dirty", *even those which have no net changes
+ to their column-based attributes*, and for which
+ no UPDATE statement has proceeded. An object is marked
+ as dirty when any of its column-based attributes have a
+ "set attribute" operation called or when any of its
+ collections are modified. If, at update time, no
+ column-based attributes have any net changes, no UPDATE
+ statement will be issued. This means that an instance
+ being sent to :meth:`~.MapperEvents.after_update` is
+ *not* a guarantee that an UPDATE statement has been
+ issued.
+
+ To detect if the column-based attributes on the object have net
+ changes, and therefore resulted in an UPDATE statement, use
+ ``object_session(instance).is_modified(instance,
+ include_collections=False)``.
+
+ The event is often called for a batch of objects of the
+ same class after their UPDATE statements have been emitted at
+ once in a previous step. In the extremely rare case that
+ this is not desirable, the :func:`.mapper` can be
+ configured with ``batch=False``, which will cause
+ batches of instances to be broken up into individual
+ (and more poorly performing) event->persist->event
+ steps.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit UPDATE statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being persisted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def before_delete(self, mapper, connection, target):
+ """Receive an object instance before a DELETE statement
+ is emitted corresponding to that instance.
+
+ This event is used to emit additional SQL statements on
+ the given connection as well as to perform application
+ specific bookkeeping related to a deletion event.
+
+ The event is often called for a batch of objects of the
+ same class before their DELETE statements are emitted at
+ once in a later step.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit DELETE statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being deleted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_delete(self, mapper, connection, target):
+ """Receive an object instance after a DELETE statement
+ has been emitted corresponding to that instance.
+
+ This event is used to emit additional SQL statements on
+ the given connection as well as to perform application
+ specific bookkeeping related to a deletion event.
+
+ The event is often called for a batch of objects of the
+ same class after their DELETE statements have been emitted at
+ once in a previous step.
+
+ .. warning::
+
+ Mapper-level flush events only allow **very limited operations**,
+ on attributes local to the row being operated upon only,
+ as well as allowing any SQL to be emitted on the given
+ :class:`_engine.Connection`. **Please read fully** the notes
+ at :ref:`session_persistence_mapper` for guidelines on using
+ these methods; generally, the :meth:`.SessionEvents.before_flush`
+ method should be preferred for general on-flush changes.
+
+ :param mapper: the :class:`_orm.Mapper` which is the target
+ of this event.
+ :param connection: the :class:`_engine.Connection` being used to
+ emit DELETE statements for this instance. This
+ provides a handle into the current transaction on the
+ target database specific to this instance.
+ :param target: the mapped instance being deleted. If
+ the event is configured with ``raw=True``, this will
+ instead be the :class:`.InstanceState` state-management
+ object associated with the instance.
+ :return: No return value is supported by this event.
+
+ .. seealso::
+
+ :ref:`session_persistence_events`
+
+ """
+
+
+class _MapperEventsHold(_EventsHold):
+ all_holds = weakref.WeakKeyDictionary()
+
+ def resolve(self, class_):
+ return _mapper_or_none(class_)
+
+ class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
+ pass
+
+ dispatch = event.dispatcher(HoldMapperEvents)
+
+
+_sessionevents_lifecycle_event_names = set()
+
+
+class SessionEvents(event.Events):
+ """Define events specific to :class:`.Session` lifecycle.
+
+ e.g.::
+
+ from sqlalchemy import event
+ from sqlalchemy.orm import sessionmaker
+
+ def my_before_commit(session):
+ print("before commit!")
+
+ Session = sessionmaker()
+
+ event.listen(Session, "before_commit", my_before_commit)
+
+ The :func:`~.event.listen` function will accept
+ :class:`.Session` objects as well as the return result
+ of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
+
+ Additionally, it accepts the :class:`.Session` class which
+ will apply listeners to all :class:`.Session` instances
+ globally.
+
+ :param raw=False: When True, the "target" argument passed
+ to applicable event listener functions that work on individual
+ objects will be the instance's :class:`.InstanceState` management
+ object, rather than the mapped instance itself.
+
+ .. versionadded:: 1.3.14
+
+ :param restore_load_context=False: Applies to the
+ :meth:`.SessionEvents.loaded_as_persistent` event. Restores the loader
+ context of the object when the event hook is complete, so that ongoing
+ eager load operations continue to target the object appropriately. A
+ warning is emitted if the object is moved to a new loader context from
+ within this event if this flag is not set.
+
+ .. versionadded:: 1.3.14
+
+ """
+
+ _target_class_doc = "SomeSessionClassOrObject"
+
+ _dispatch_target = Session
+
+ def _lifecycle_event(fn):
+ _sessionevents_lifecycle_event_names.add(fn.__name__)
+ return fn
+
+ @classmethod
+ def _accept_with(cls, target):
+ if isinstance(target, scoped_session):
+
+ target = target.session_factory
+ if not isinstance(target, sessionmaker) and (
+ not isinstance(target, type) or not issubclass(target, Session)
+ ):
+ raise exc.ArgumentError(
+ "Session event listen on a scoped_session "
+ "requires that its creation callable "
+ "is associated with the Session class."
+ )
+
+ if isinstance(target, sessionmaker):
+ return target.class_
+ elif isinstance(target, type):
+ if issubclass(target, scoped_session):
+ return Session
+ elif issubclass(target, Session):
+ return target
+ elif isinstance(target, Session):
+ return target
+ else:
+ # allows alternate SessionEvents-like-classes to be consulted
+ return event.Events._accept_with(target)
+
+ @classmethod
+ def _listen(cls, event_key, raw=False, restore_load_context=False, **kw):
+ is_instance_event = (
+ event_key.identifier in _sessionevents_lifecycle_event_names
+ )
+
+ if is_instance_event:
+ if not raw or restore_load_context:
+
+ fn = event_key._listen_fn
+
+ def wrap(session, state, *arg, **kw):
+ if not raw:
+ target = state.obj()
+ if target is None:
+ # existing behavior is that if the object is
+ # garbage collected, no event is emitted
+ return
+ else:
+ target = state
+ if restore_load_context:
+ runid = state.runid
+ try:
+ return fn(session, target, *arg, **kw)
+ finally:
+ if restore_load_context:
+ state.runid = runid
+
+ event_key = event_key.with_wrapper(wrap)
+
+ event_key.base_listen(**kw)
+
+ def do_orm_execute(self, orm_execute_state):
+ """Intercept statement executions that occur on behalf of an
+ ORM :class:`.Session` object.
+
+ This event is invoked for all top-level SQL statements invoked from the
+ :meth:`_orm.Session.execute` method, as well as related methods such as
+ :meth:`_orm.Session.scalars` and :meth:`_orm.Session.scalar`. As of
+ SQLAlchemy 1.4, all ORM queries emitted on behalf of a
+ :class:`_orm.Session` will flow through this method, so this event hook
+ provides the single point at which ORM queries of all types may be
+ intercepted before they are invoked, and additionally to replace their
+ execution with a different process.
+
+ .. note:: The :meth:`_orm.SessionEvents.do_orm_execute` event hook
+ is triggered **for ORM statement executions only**, meaning those
+ invoked via the :meth:`_orm.Session.execute` and similar methods on
+ the :class:`_orm.Session` object. It does **not** trigger for
+ statements that are invoked by SQLAlchemy Core only, i.e. statements
+ invoked directly using :meth:`_engine.Connection.execute` or
+ otherwise originating from an :class:`_engine.Engine` object without
+ any :class:`_orm.Session` involved. To intercept **all** SQL
+ executions regardless of whether the Core or ORM APIs are in use,
+ see the event hooks at
+ :class:`.ConnectionEvents`, such as
+ :meth:`.ConnectionEvents.before_execute` and
+ :meth:`.ConnectionEvents.before_cursor_execute`.
+
+ This event is a ``do_`` event, meaning it has the capability to replace
+ the operation that the :meth:`_orm.Session.execute` method normally
+ performs. The intended use for this includes sharding and
+ result-caching schemes which may seek to invoke the same statement
+ across multiple database connections, returning a result that is
+ merged from each of them, or which don't invoke the statement at all,
+ instead returning data from a cache.
+
+ The hook intends to replace the use of the
+ ``Query._execute_and_instances`` method that could be subclassed prior
+ to SQLAlchemy 1.4.
+
+ :param orm_execute_state: an instance of :class:`.ORMExecuteState`
+ which contains all information about the current execution, as well
+ as helper functions used to derive other commonly required
+ information. See that object for details.
+
+ .. seealso::
+
+ :ref:`session_execute_events` - top level documentation on how
+ to use :meth:`_orm.SessionEvents.do_orm_execute`
+
+ :class:`.ORMExecuteState` - the object passed to the
+ :meth:`_orm.SessionEvents.do_orm_execute` event which contains
+ all information about the statement to be invoked. It also
+ provides an interface to extend the current statement, options,
+ and parameters as well as an option that allows programmatic
+ invocation of the statement at any point.
+
+ :ref:`examples_session_orm_events` - includes examples of using
+ :meth:`_orm.SessionEvents.do_orm_execute`
+
+ :ref:`examples_caching` - an example of how to integrate
+ Dogpile caching with the ORM :class:`_orm.Session` making use
+ of the :meth:`_orm.SessionEvents.do_orm_execute` event hook.
+
+ :ref:`examples_sharding` - the Horizontal Sharding example /
+ extension relies upon the
+ :meth:`_orm.SessionEvents.do_orm_execute` event hook to invoke a
+ SQL statement on multiple backends and return a merged result.
+
+
+ .. versionadded:: 1.4
+
+ """
+
+ def after_transaction_create(self, session, transaction):
+ """Execute when a new :class:`.SessionTransaction` is created.
+
+ This event differs from :meth:`~.SessionEvents.after_begin`
+ in that it occurs for each :class:`.SessionTransaction`
+ overall, as opposed to when transactions are begun
+ on individual database connections. It is also invoked
+ for nested transactions and subtransactions, and is always
+ matched by a corresponding
+ :meth:`~.SessionEvents.after_transaction_end` event
+ (assuming normal operation of the :class:`.Session`).
+
+ :param session: the target :class:`.Session`.
+ :param transaction: the target :class:`.SessionTransaction`.
+
+ To detect if this is the outermost
+ :class:`.SessionTransaction`, as opposed to a "subtransaction" or a
+ SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
+ is ``None``::
+
+ @event.listens_for(session, "after_transaction_create")
+ def after_transaction_create(session, transaction):
+ if transaction.parent is None:
+ # work with top-level transaction
+
+ To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
+ :attr:`.SessionTransaction.nested` attribute::
+
+ @event.listens_for(session, "after_transaction_create")
+ def after_transaction_create(session, transaction):
+ if transaction.nested:
+ # work with SAVEPOINT transaction
+
+
+ .. seealso::
+
+ :class:`.SessionTransaction`
+
+ :meth:`~.SessionEvents.after_transaction_end`
+
+ """
+
+ def after_transaction_end(self, session, transaction):
+ """Execute when the span of a :class:`.SessionTransaction` ends.
+
+ This event differs from :meth:`~.SessionEvents.after_commit`
+ in that it corresponds to all :class:`.SessionTransaction`
+ objects in use, including those for nested transactions
+ and subtransactions, and is always matched by a corresponding
+ :meth:`~.SessionEvents.after_transaction_create` event.
+
+ :param session: the target :class:`.Session`.
+ :param transaction: the target :class:`.SessionTransaction`.
+
+ To detect if this is the outermost
+ :class:`.SessionTransaction`, as opposed to a "subtransaction" or a
+ SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
+ is ``None``::
+
+ @event.listens_for(session, "after_transaction_create")
+ def after_transaction_end(session, transaction):
+ if transaction.parent is None:
+ # work with top-level transaction
+
+ To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
+ :attr:`.SessionTransaction.nested` attribute::
+
+ @event.listens_for(session, "after_transaction_create")
+ def after_transaction_end(session, transaction):
+ if transaction.nested:
+ # work with SAVEPOINT transaction
+
+
+ .. seealso::
+
+ :class:`.SessionTransaction`
+
+ :meth:`~.SessionEvents.after_transaction_create`
+
+ """
+
+ def before_commit(self, session):
+ """Execute before commit is called.
+
+ .. note::
+
+ The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
+ that is, the :class:`.Session` can emit SQL to the database
+ many times within the scope of a transaction.
+ For interception of these events, use the
+ :meth:`~.SessionEvents.before_flush`,
+ :meth:`~.SessionEvents.after_flush`, or
+ :meth:`~.SessionEvents.after_flush_postexec`
+ events.
+
+ :param session: The target :class:`.Session`.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.after_commit`
+
+ :meth:`~.SessionEvents.after_begin`
+
+ :meth:`~.SessionEvents.after_transaction_create`
+
+ :meth:`~.SessionEvents.after_transaction_end`
+
+ """
+
+ def after_commit(self, session):
+ """Execute after a commit has occurred.
+
+ .. note::
+
+ The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
+ that is, the :class:`.Session` can emit SQL to the database
+ many times within the scope of a transaction.
+ For interception of these events, use the
+ :meth:`~.SessionEvents.before_flush`,
+ :meth:`~.SessionEvents.after_flush`, or
+ :meth:`~.SessionEvents.after_flush_postexec`
+ events.
+
+ .. note::
+
+ The :class:`.Session` is not in an active transaction
+ when the :meth:`~.SessionEvents.after_commit` event is invoked,
+ and therefore can not emit SQL. To emit SQL corresponding to
+ every transaction, use the :meth:`~.SessionEvents.before_commit`
+ event.
+
+ :param session: The target :class:`.Session`.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.before_commit`
+
+ :meth:`~.SessionEvents.after_begin`
+
+ :meth:`~.SessionEvents.after_transaction_create`
+
+ :meth:`~.SessionEvents.after_transaction_end`
+
+ """
+
+ def after_rollback(self, session):
+ """Execute after a real DBAPI rollback has occurred.
+
+ Note that this event only fires when the *actual* rollback against
+ the database occurs - it does *not* fire each time the
+ :meth:`.Session.rollback` method is called, if the underlying
+ DBAPI transaction has already been rolled back. In many
+ cases, the :class:`.Session` will not be in
+ an "active" state during this event, as the current
+ transaction is not valid. To acquire a :class:`.Session`
+ which is active after the outermost rollback has proceeded,
+ use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
+ :attr:`.Session.is_active` flag.
+
+ :param session: The target :class:`.Session`.
+
+ """
+
+ def after_soft_rollback(self, session, previous_transaction):
+ """Execute after any rollback has occurred, including "soft"
+ rollbacks that don't actually emit at the DBAPI level.
+
+ This corresponds to both nested and outer rollbacks, i.e.
+ the innermost rollback that calls the DBAPI's
+ rollback() method, as well as the enclosing rollback
+ calls that only pop themselves from the transaction stack.
+
+ The given :class:`.Session` can be used to invoke SQL and
+ :meth:`.Session.query` operations after an outermost rollback
+ by first checking the :attr:`.Session.is_active` flag::
+
+ @event.listens_for(Session, "after_soft_rollback")
+ def do_something(session, previous_transaction):
+ if session.is_active:
+ session.execute("select * from some_table")
+
+ :param session: The target :class:`.Session`.
+ :param previous_transaction: The :class:`.SessionTransaction`
+ transactional marker object which was just closed. The current
+ :class:`.SessionTransaction` for the given :class:`.Session` is
+ available via the :attr:`.Session.transaction` attribute.
+
+ """
+
+ def before_flush(self, session, flush_context, instances):
+ """Execute before flush process has started.
+
+ :param session: The target :class:`.Session`.
+ :param flush_context: Internal :class:`.UOWTransaction` object
+ which handles the details of the flush.
+ :param instances: Usually ``None``, this is the collection of
+ objects which can be passed to the :meth:`.Session.flush` method
+ (note this usage is deprecated).
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.after_flush`
+
+ :meth:`~.SessionEvents.after_flush_postexec`
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_flush(self, session, flush_context):
+ """Execute after flush has completed, but before commit has been
+ called.
+
+ Note that the session's state is still in pre-flush, i.e. 'new',
+ 'dirty', and 'deleted' lists still show pre-flush state as well
+ as the history settings on instance attributes.
+
+ .. warning:: This event runs after the :class:`.Session` has emitted
+ SQL to modify the database, but **before** it has altered its
+ internal state to reflect those changes, including that newly
+ inserted objects are placed into the identity map. ORM operations
+ emitted within this event such as loads of related items
+ may produce new identity map entries that will immediately
+ be replaced, sometimes causing confusing results. SQLAlchemy will
+ emit a warning for this condition as of version 1.3.9.
+
+ :param session: The target :class:`.Session`.
+ :param flush_context: Internal :class:`.UOWTransaction` object
+ which handles the details of the flush.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.before_flush`
+
+ :meth:`~.SessionEvents.after_flush_postexec`
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_flush_postexec(self, session, flush_context):
+ """Execute after flush has completed, and after the post-exec
+ state occurs.
+
+ This will be when the 'new', 'dirty', and 'deleted' lists are in
+ their final state. An actual commit() may or may not have
+ occurred, depending on whether or not the flush started its own
+ transaction or participated in a larger transaction.
+
+ :param session: The target :class:`.Session`.
+ :param flush_context: Internal :class:`.UOWTransaction` object
+ which handles the details of the flush.
+
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.before_flush`
+
+ :meth:`~.SessionEvents.after_flush`
+
+ :ref:`session_persistence_events`
+
+ """
+
+ def after_begin(self, session, transaction, connection):
+ """Execute after a transaction is begun on a connection
+
+ :param session: The target :class:`.Session`.
+ :param transaction: The :class:`.SessionTransaction`.
+ :param connection: The :class:`_engine.Connection` object
+ which will be used for SQL statements.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.before_commit`
+
+ :meth:`~.SessionEvents.after_commit`
+
+ :meth:`~.SessionEvents.after_transaction_create`
+
+ :meth:`~.SessionEvents.after_transaction_end`
+
+ """
+
+ @_lifecycle_event
+ def before_attach(self, session, instance):
+ """Execute before an instance is attached to a session.
+
+ This is called before an add, delete or merge causes
+ the object to be part of the session.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.after_attach`
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def after_attach(self, session, instance):
+ """Execute after an instance is attached to a session.
+
+ This is called after an add, delete or merge.
+
+ .. note::
+
+ As of 0.8, this event fires off *after* the item
+ has been fully associated with the session, which is
+ different than previous releases. For event
+ handlers that require the object not yet
+ be part of session state (such as handlers which
+ may autoflush while the target object is not
+ yet complete) consider the
+ new :meth:`.before_attach` event.
+
+ .. seealso::
+
+ :meth:`~.SessionEvents.before_attach`
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @event._legacy_signature(
+ "0.9",
+ ["session", "query", "query_context", "result"],
+ lambda update_context: (
+ update_context.session,
+ update_context.query,
+ None,
+ update_context.result,
+ ),
+ )
+ def after_bulk_update(self, update_context):
+ """Execute after an ORM UPDATE against a WHERE expression has been
+ invoked.
+
+ This is called as a result of the :meth:`_query.Query.update` method.
+
+ :param update_context: an "update context" object which contains
+ details about the update, including these attributes:
+
+ * ``session`` - the :class:`.Session` involved
+ * ``query`` -the :class:`_query.Query`
+ object that this update operation
+ was called upon.
+ * ``values`` The "values" dictionary that was passed to
+ :meth:`_query.Query.update`.
+ * ``result`` the :class:`_engine.CursorResult`
+ returned as a result of the
+ bulk UPDATE operation.
+
+ .. versionchanged:: 1.4 the update_context no longer has a
+ ``QueryContext`` object associated with it.
+
+ .. seealso::
+
+ :meth:`.QueryEvents.before_compile_update`
+
+ :meth:`.SessionEvents.after_bulk_delete`
+
+ """
+
+ @event._legacy_signature(
+ "0.9",
+ ["session", "query", "query_context", "result"],
+ lambda delete_context: (
+ delete_context.session,
+ delete_context.query,
+ None,
+ delete_context.result,
+ ),
+ )
+ def after_bulk_delete(self, delete_context):
+ """Execute after ORM DELETE against a WHERE expression has been
+ invoked.
+
+ This is called as a result of the :meth:`_query.Query.delete` method.
+
+ :param delete_context: a "delete context" object which contains
+ details about the update, including these attributes:
+
+ * ``session`` - the :class:`.Session` involved
+ * ``query`` -the :class:`_query.Query`
+ object that this update operation
+ was called upon.
+ * ``result`` the :class:`_engine.CursorResult`
+ returned as a result of the
+ bulk DELETE operation.
+
+ .. versionchanged:: 1.4 the update_context no longer has a
+ ``QueryContext`` object associated with it.
+
+ .. seealso::
+
+ :meth:`.QueryEvents.before_compile_delete`
+
+ :meth:`.SessionEvents.after_bulk_update`
+
+ """
+
+ @_lifecycle_event
+ def transient_to_pending(self, session, instance):
+ """Intercept the "transient to pending" transition for a specific
+ object.
+
+ This event is a specialization of the
+ :meth:`.SessionEvents.after_attach` event which is only invoked
+ for this specific transition. It is invoked typically during the
+ :meth:`.Session.add` call.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def pending_to_transient(self, session, instance):
+ """Intercept the "pending to transient" transition for a specific
+ object.
+
+ This less common transition occurs when an pending object that has
+ not been flushed is evicted from the session; this can occur
+ when the :meth:`.Session.rollback` method rolls back the transaction,
+ or when the :meth:`.Session.expunge` method is used.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def persistent_to_transient(self, session, instance):
+ """Intercept the "persistent to transient" transition for a specific
+ object.
+
+ This less common transition occurs when an pending object that has
+ has been flushed is evicted from the session; this can occur
+ when the :meth:`.Session.rollback` method rolls back the transaction.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def pending_to_persistent(self, session, instance):
+ """Intercept the "pending to persistent"" transition for a specific
+ object.
+
+ This event is invoked within the flush process, and is
+ similar to scanning the :attr:`.Session.new` collection within
+ the :meth:`.SessionEvents.after_flush` event. However, in this
+ case the object has already been moved to the persistent state
+ when the event is called.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def detached_to_persistent(self, session, instance):
+ """Intercept the "detached to persistent" transition for a specific
+ object.
+
+ This event is a specialization of the
+ :meth:`.SessionEvents.after_attach` event which is only invoked
+ for this specific transition. It is invoked typically during the
+ :meth:`.Session.add` call, as well as during the
+ :meth:`.Session.delete` call if the object was not previously
+ associated with the
+ :class:`.Session` (note that an object marked as "deleted" remains
+ in the "persistent" state until the flush proceeds).
+
+ .. note::
+
+ If the object becomes persistent as part of a call to
+ :meth:`.Session.delete`, the object is **not** yet marked as
+ deleted when this event is called. To detect deleted objects,
+ check the ``deleted`` flag sent to the
+ :meth:`.SessionEvents.persistent_to_detached` to event after the
+ flush proceeds, or check the :attr:`.Session.deleted` collection
+ within the :meth:`.SessionEvents.before_flush` event if deleted
+ objects need to be intercepted before the flush.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def loaded_as_persistent(self, session, instance):
+ """Intercept the "loaded as persistent" transition for a specific
+ object.
+
+ This event is invoked within the ORM loading process, and is invoked
+ very similarly to the :meth:`.InstanceEvents.load` event. However,
+ the event here is linkable to a :class:`.Session` class or instance,
+ rather than to a mapper or class hierarchy, and integrates
+ with the other session lifecycle events smoothly. The object
+ is guaranteed to be present in the session's identity map when
+ this event is called.
+
+ .. note:: This event is invoked within the loader process before
+ eager loaders may have been completed, and the object's state may
+ not be complete. Additionally, invoking row-level refresh
+ operations on the object will place the object into a new loader
+ context, interfering with the existing load context. See the note
+ on :meth:`.InstanceEvents.load` for background on making use of the
+ :paramref:`.SessionEvents.restore_load_context` parameter, which
+ works in the same manner as that of
+ :paramref:`.InstanceEvents.restore_load_context`, in order to
+ resolve this scenario.
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def persistent_to_deleted(self, session, instance):
+ """Intercept the "persistent to deleted" transition for a specific
+ object.
+
+ This event is invoked when a persistent object's identity
+ is deleted from the database within a flush, however the object
+ still remains associated with the :class:`.Session` until the
+ transaction completes.
+
+ If the transaction is rolled back, the object moves again
+ to the persistent state, and the
+ :meth:`.SessionEvents.deleted_to_persistent` event is called.
+ If the transaction is committed, the object becomes detached,
+ which will emit the :meth:`.SessionEvents.deleted_to_detached`
+ event.
+
+ Note that while the :meth:`.Session.delete` method is the primary
+ public interface to mark an object as deleted, many objects
+ get deleted due to cascade rules, which are not always determined
+ until flush time. Therefore, there's no way to catch
+ every object that will be deleted until the flush has proceeded.
+ the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
+ invoked at the end of a flush.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def deleted_to_persistent(self, session, instance):
+ """Intercept the "deleted to persistent" transition for a specific
+ object.
+
+ This transition occurs only when an object that's been deleted
+ successfully in a flush is restored due to a call to
+ :meth:`.Session.rollback`. The event is not called under
+ any other circumstances.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def deleted_to_detached(self, session, instance):
+ """Intercept the "deleted to detached" transition for a specific
+ object.
+
+ This event is invoked when a deleted object is evicted
+ from the session. The typical case when this occurs is when
+ the transaction for a :class:`.Session` in which the object
+ was deleted is committed; the object moves from the deleted
+ state to the detached state.
+
+ It is also invoked for objects that were deleted in a flush
+ when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
+ events are called, as well as if the object is individually
+ expunged from its deleted state via :meth:`.Session.expunge`.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+ @_lifecycle_event
+ def persistent_to_detached(self, session, instance):
+ """Intercept the "persistent to detached" transition for a specific
+ object.
+
+ This event is invoked when a persistent object is evicted
+ from the session. There are many conditions that cause this
+ to happen, including:
+
+ * using a method such as :meth:`.Session.expunge`
+ or :meth:`.Session.close`
+
+ * Calling the :meth:`.Session.rollback` method, when the object
+ was part of an INSERT statement for that session's transaction
+
+
+ :param session: target :class:`.Session`
+
+ :param instance: the ORM-mapped instance being operated upon.
+
+ :param deleted: boolean. If True, indicates this object moved
+ to the detached state because it was marked as deleted and flushed.
+
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_lifecycle_events`
+
+ """
+
+
+class AttributeEvents(event.Events):
+ r"""Define events for object attributes.
+
+ These are typically defined on the class-bound descriptor for the
+ target class.
+
+ For example, to register a listener that will receive the
+ :meth:`_orm.AttributeEvents.append` event::
+
+ from sqlalchemy import event
+
+ @event.listens_for(MyClass.collection, 'append', propagate=True)
+ def my_append_listener(target, value, initiator):
+ print("received append event for target: %s" % target)
+
+
+ Listeners have the option to return a possibly modified version of the
+ value, when the :paramref:`.AttributeEvents.retval` flag is passed to
+ :func:`.event.listen` or :func:`.event.listens_for`, such as below,
+ illustrated using the :meth:`_orm.AttributeEvents.set` event::
+
+ def validate_phone(target, value, oldvalue, initiator):
+ "Strip non-numeric characters from a phone number"
+
+ return re.sub(r'\D', '', value)
+
+ # setup listener on UserContact.phone attribute, instructing
+ # it to use the return value
+ listen(UserContact.phone, 'set', validate_phone, retval=True)
+
+ A validation function like the above can also raise an exception
+ such as :exc:`ValueError` to halt the operation.
+
+ The :paramref:`.AttributeEvents.propagate` flag is also important when
+ applying listeners to mapped classes that also have mapped subclasses,
+ as when using mapper inheritance patterns::
+
+
+ @event.listens_for(MySuperClass.attr, 'set', propagate=True)
+ def receive_set(target, value, initiator):
+ print("value set: %s" % target)
+
+ The full list of modifiers available to the :func:`.event.listen`
+ and :func:`.event.listens_for` functions are below.
+
+ :param active_history=False: When True, indicates that the
+ "set" event would like to receive the "old" value being
+ replaced unconditionally, even if this requires firing off
+ database loads. Note that ``active_history`` can also be
+ set directly via :func:`.column_property` and
+ :func:`_orm.relationship`.
+
+ :param propagate=False: When True, the listener function will
+ be established not just for the class attribute given, but
+ for attributes of the same name on all current subclasses
+ of that class, as well as all future subclasses of that
+ class, using an additional listener that listens for
+ instrumentation events.
+ :param raw=False: When True, the "target" argument to the
+ event will be the :class:`.InstanceState` management
+ object, rather than the mapped instance itself.
+ :param retval=False: when True, the user-defined event
+ listening must return the "value" argument from the
+ function. This gives the listening function the opportunity
+ to change the value that is ultimately used for a "set"
+ or "append" event.
+
+ """
+
+ _target_class_doc = "SomeClass.some_attribute"
+ _dispatch_target = QueryableAttribute
+
+ @staticmethod
+ def _set_dispatch(cls, dispatch_cls):
+ dispatch = event.Events._set_dispatch(cls, dispatch_cls)
+ dispatch_cls._active_history = False
+ return dispatch
+
+ @classmethod
+ def _accept_with(cls, target):
+ # TODO: coverage
+ if isinstance(target, interfaces.MapperProperty):
+ return getattr(target.parent.class_, target.key)
+ else:
+ return target
+
+ @classmethod
+ def _listen(
+ cls,
+ event_key,
+ active_history=False,
+ raw=False,
+ retval=False,
+ propagate=False,
+ ):
+
+ target, fn = event_key.dispatch_target, event_key._listen_fn
+
+ if active_history:
+ target.dispatch._active_history = True
+
+ if not raw or not retval:
+
+ def wrap(target, *arg):
+ if not raw:
+ target = target.obj()
+ if not retval:
+ if arg:
+ value = arg[0]
+ else:
+ value = None
+ fn(target, *arg)
+ return value
+ else:
+ return fn(target, *arg)
+
+ event_key = event_key.with_wrapper(wrap)
+
+ event_key.base_listen(propagate=propagate)
+
+ if propagate:
+ manager = instrumentation.manager_of_class(target.class_)
+
+ for mgr in manager.subclass_managers(True):
+ event_key.with_dispatch_target(mgr[target.key]).base_listen(
+ propagate=True
+ )
+ if active_history:
+ mgr[target.key].dispatch._active_history = True
+
+ def append(self, target, value, initiator):
+ """Receive a collection append event.
+
+ The append event is invoked for each element as it is appended
+ to the collection. This occurs for single-item appends as well
+ as for a "bulk replace" operation.
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: the value being appended. If this listener
+ is registered with ``retval=True``, the listener
+ function must return this value, or a new value which
+ replaces it.
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event. May be modified
+ from its original value by backref handlers in order to control
+ chained event propagation, as well as be inspected for information
+ about the source of the event.
+ :return: if the event was registered with ``retval=True``,
+ the given value, or a new effective value, should be returned.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ :meth:`.AttributeEvents.bulk_replace`
+
+ """
+
+ def append_wo_mutation(self, target, value, initiator):
+ """Receive a collection append event where the collection was not
+ actually mutated.
+
+ This event differs from :meth:`_orm.AttributeEvents.append` in that
+ it is fired off for de-duplicating collections such as sets and
+ dictionaries, when the object already exists in the target collection.
+ The event does not have a return value and the identity of the
+ given object cannot be changed.
+
+ The event is used for cascading objects into a :class:`_orm.Session`
+ when the collection has already been mutated via a backref event.
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: the value that would be appended if the object did not
+ already exist in the collection.
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event. May be modified
+ from its original value by backref handlers in order to control
+ chained event propagation, as well as be inspected for information
+ about the source of the event.
+
+ :return: No return value is defined for this event.
+
+ .. versionadded:: 1.4.15
+
+ """
+
+ def bulk_replace(self, target, values, initiator):
+ """Receive a collection 'bulk replace' event.
+
+ This event is invoked for a sequence of values as they are incoming
+ to a bulk collection set operation, which can be
+ modified in place before the values are treated as ORM objects.
+ This is an "early hook" that runs before the bulk replace routine
+ attempts to reconcile which objects are already present in the
+ collection and which are being removed by the net replace operation.
+
+ It is typical that this method be combined with use of the
+ :meth:`.AttributeEvents.append` event. When using both of these
+ events, note that a bulk replace operation will invoke
+ the :meth:`.AttributeEvents.append` event for all new items,
+ even after :meth:`.AttributeEvents.bulk_replace` has been invoked
+ for the collection as a whole. In order to determine if an
+ :meth:`.AttributeEvents.append` event is part of a bulk replace,
+ use the symbol :attr:`~.attributes.OP_BULK_REPLACE` to test the
+ incoming initiator::
+
+ from sqlalchemy.orm.attributes import OP_BULK_REPLACE
+
+ @event.listens_for(SomeObject.collection, "bulk_replace")
+ def process_collection(target, values, initiator):
+ values[:] = [_make_value(value) for value in values]
+
+ @event.listens_for(SomeObject.collection, "append", retval=True)
+ def process_collection(target, value, initiator):
+ # make sure bulk_replace didn't already do it
+ if initiator is None or initiator.op is not OP_BULK_REPLACE:
+ return _make_value(value)
+ else:
+ return value
+
+ .. versionadded:: 1.2
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: a sequence (e.g. a list) of the values being set. The
+ handler can modify this list in place.
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+
+ """
+
+ def remove(self, target, value, initiator):
+ """Receive a collection remove event.
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: the value being removed.
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event. May be modified
+ from its original value by backref handlers in order to control
+ chained event propagation.
+
+ .. versionchanged:: 0.9.0 the ``initiator`` argument is now
+ passed as a :class:`.attributes.Event` object, and may be
+ modified by backref handlers within a chain of backref-linked
+ events.
+
+ :return: No return value is defined for this event.
+
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ """
+
+ def set(self, target, value, oldvalue, initiator):
+ """Receive a scalar set event.
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: the value being set. If this listener
+ is registered with ``retval=True``, the listener
+ function must return this value, or a new value which
+ replaces it.
+ :param oldvalue: the previous value being replaced. This
+ may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
+ If the listener is registered with ``active_history=True``,
+ the previous value of the attribute will be loaded from
+ the database if the existing value is currently unloaded
+ or expired.
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event. May be modified
+ from its original value by backref handlers in order to control
+ chained event propagation.
+
+ .. versionchanged:: 0.9.0 the ``initiator`` argument is now
+ passed as a :class:`.attributes.Event` object, and may be
+ modified by backref handlers within a chain of backref-linked
+ events.
+
+ :return: if the event was registered with ``retval=True``,
+ the given value, or a new effective value, should be returned.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ """
+
+ def init_scalar(self, target, value, dict_):
+ r"""Receive a scalar "init" event.
+
+ This event is invoked when an uninitialized, unpersisted scalar
+ attribute is accessed, e.g. read::
+
+
+ x = my_object.some_attribute
+
+ The ORM's default behavior when this occurs for an un-initialized
+ attribute is to return the value ``None``; note this differs from
+ Python's usual behavior of raising ``AttributeError``. The
+ event here can be used to customize what value is actually returned,
+ with the assumption that the event listener would be mirroring
+ a default generator that is configured on the Core
+ :class:`_schema.Column`
+ object as well.
+
+ Since a default generator on a :class:`_schema.Column`
+ might also produce
+ a changing value such as a timestamp, the
+ :meth:`.AttributeEvents.init_scalar`
+ event handler can also be used to **set** the newly returned value, so
+ that a Core-level default generation function effectively fires off
+ only once, but at the moment the attribute is accessed on the
+ non-persisted object. Normally, no change to the object's state
+ is made when an uninitialized attribute is accessed (much older
+ SQLAlchemy versions did in fact change the object's state).
+
+ If a default generator on a column returned a particular constant,
+ a handler might be used as follows::
+
+ SOME_CONSTANT = 3.1415926
+
+ class MyClass(Base):
+ # ...
+
+ some_attribute = Column(Numeric, default=SOME_CONSTANT)
+
+ @event.listens_for(
+ MyClass.some_attribute, "init_scalar",
+ retval=True, propagate=True)
+ def _init_some_attribute(target, dict_, value):
+ dict_['some_attribute'] = SOME_CONSTANT
+ return SOME_CONSTANT
+
+ Above, we initialize the attribute ``MyClass.some_attribute`` to the
+ value of ``SOME_CONSTANT``. The above code includes the following
+ features:
+
+ * By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
+ we indicate that this value is to be persisted to the database.
+ This supersedes the use of ``SOME_CONSTANT`` in the default generator
+ for the :class:`_schema.Column`. The ``active_column_defaults.py``
+ example given at :ref:`examples_instrumentation` illustrates using
+ the same approach for a changing default, e.g. a timestamp
+ generator. In this particular example, it is not strictly
+ necessary to do this since ``SOME_CONSTANT`` would be part of the
+ INSERT statement in either case.
+
+ * By establishing the ``retval=True`` flag, the value we return
+ from the function will be returned by the attribute getter.
+ Without this flag, the event is assumed to be a passive observer
+ and the return value of our function is ignored.
+
+ * The ``propagate=True`` flag is significant if the mapped class
+ includes inheriting subclasses, which would also make use of this
+ event listener. Without this flag, an inheriting subclass will
+ not use our event handler.
+
+ In the above example, the attribute set event
+ :meth:`.AttributeEvents.set` as well as the related validation feature
+ provided by :obj:`_orm.validates` is **not** invoked when we apply our
+ value to the given ``dict_``. To have these events to invoke in
+ response to our newly generated value, apply the value to the given
+ object as a normal attribute set operation::
+
+ SOME_CONSTANT = 3.1415926
+
+ @event.listens_for(
+ MyClass.some_attribute, "init_scalar",
+ retval=True, propagate=True)
+ def _init_some_attribute(target, dict_, value):
+ # will also fire off attribute set events
+ target.some_attribute = SOME_CONSTANT
+ return SOME_CONSTANT
+
+ When multiple listeners are set up, the generation of the value
+ is "chained" from one listener to the next by passing the value
+ returned by the previous listener that specifies ``retval=True``
+ as the ``value`` argument of the next listener.
+
+ .. versionadded:: 1.1
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param value: the value that is to be returned before this event
+ listener were invoked. This value begins as the value ``None``,
+ however will be the return value of the previous event handler
+ function if multiple listeners are present.
+ :param dict\_: the attribute dictionary of this mapped object.
+ This is normally the ``__dict__`` of the object, but in all cases
+ represents the destination that the attribute system uses to get
+ at the actual value of this attribute. Placing the value in this
+ dictionary has the effect that the value will be used in the
+ INSERT statement generated by the unit of work.
+
+
+ .. seealso::
+
+ :meth:`.AttributeEvents.init_collection` - collection version
+ of this event
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ :ref:`examples_instrumentation` - see the
+ ``active_column_defaults.py`` example.
+
+ """
+
+ def init_collection(self, target, collection, collection_adapter):
+ """Receive a 'collection init' event.
+
+ This event is triggered for a collection-based attribute, when
+ the initial "empty collection" is first generated for a blank
+ attribute, as well as for when the collection is replaced with
+ a new one, such as via a set event.
+
+ E.g., given that ``User.addresses`` is a relationship-based
+ collection, the event is triggered here::
+
+ u1 = User()
+ u1.addresses.append(a1) # <- new collection
+
+ and also during replace operations::
+
+ u1.addresses = [a2, a3] # <- new collection
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+ :param collection: the new collection. This will always be generated
+ from what was specified as
+ :paramref:`_orm.relationship.collection_class`, and will always
+ be empty.
+ :param collection_adapter: the :class:`.CollectionAdapter` that will
+ mediate internal access to the collection.
+
+ .. versionadded:: 1.0.0 :meth:`.AttributeEvents.init_collection`
+ and :meth:`.AttributeEvents.dispose_collection` events.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ :meth:`.AttributeEvents.init_scalar` - "scalar" version of this
+ event.
+
+ """
+
+ def dispose_collection(self, target, collection, collection_adapter):
+ """Receive a 'collection dispose' event.
+
+ This event is triggered for a collection-based attribute when
+ a collection is replaced, that is::
+
+ u1.addresses.append(a1)
+
+ u1.addresses = [a2, a3] # <- old collection is disposed
+
+ The old collection received will contain its previous contents.
+
+ .. versionchanged:: 1.2 The collection passed to
+ :meth:`.AttributeEvents.dispose_collection` will now have its
+ contents before the dispose intact; previously, the collection
+ would be empty.
+
+ .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
+ and :meth:`.AttributeEvents.dispose_collection` events.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ """
+
+ def modified(self, target, initiator):
+ """Receive a 'modified' event.
+
+ This event is triggered when the :func:`.attributes.flag_modified`
+ function is used to trigger a modify event on an attribute without
+ any specific value being set.
+
+ .. versionadded:: 1.2
+
+ :param target: the object instance receiving the event.
+ If the listener is registered with ``raw=True``, this will
+ be the :class:`.InstanceState` object.
+
+ :param initiator: An instance of :class:`.attributes.Event`
+ representing the initiation of the event.
+
+ .. seealso::
+
+ :class:`.AttributeEvents` - background on listener options such
+ as propagation to subclasses.
+
+ """
+
+
+class QueryEvents(event.Events):
+ """Represent events within the construction of a :class:`_query.Query`
+ object.
+
+ The :class:`_orm.QueryEvents` hooks are now superseded by the
+ :meth:`_orm.SessionEvents.do_orm_execute` event hook.
+
+ """
+
+ _target_class_doc = "SomeQuery"
+ _dispatch_target = Query
+
+ def before_compile(self, query):
+ """Receive the :class:`_query.Query`
+ object before it is composed into a
+ core :class:`_expression.Select` object.
+
+ .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile` event
+ is superseded by the much more capable
+ :meth:`_orm.SessionEvents.do_orm_execute` hook. In version 1.4,
+ the :meth:`_orm.QueryEvents.before_compile` event is **no longer
+ used** for ORM-level attribute loads, such as loads of deferred
+ or expired attributes as well as relationship loaders. See the
+ new examples in :ref:`examples_session_orm_events` which
+ illustrate new ways of intercepting and modifying ORM queries
+ for the most common purpose of adding arbitrary filter criteria.
+
+
+ This event is intended to allow changes to the query given::
+
+ @event.listens_for(Query, "before_compile", retval=True)
+ def no_deleted(query):
+ for desc in query.column_descriptions:
+ if desc['type'] is User:
+ entity = desc['entity']
+ query = query.filter(entity.deleted == False)
+ return query
+
+ The event should normally be listened with the ``retval=True``
+ parameter set, so that the modified query may be returned.
+
+ The :meth:`.QueryEvents.before_compile` event by default
+ will disallow "baked" queries from caching a query, if the event
+ hook returns a new :class:`_query.Query` object.
+ This affects both direct
+ use of the baked query extension as well as its operation within
+ lazy loaders and eager loaders for relationships. In order to
+ re-establish the query being cached, apply the event adding the
+ ``bake_ok`` flag::
+
+ @event.listens_for(
+ Query, "before_compile", retval=True, bake_ok=True)
+ def my_event(query):
+ for desc in query.column_descriptions:
+ if desc['type'] is User:
+ entity = desc['entity']
+ query = query.filter(entity.deleted == False)
+ return query
+
+ When ``bake_ok`` is set to True, the event hook will only be invoked
+ once, and not called for subsequent invocations of a particular query
+ that is being cached.
+
+ .. versionadded:: 1.3.11 - added the "bake_ok" flag to the
+ :meth:`.QueryEvents.before_compile` event and disallowed caching via
+ the "baked" extension from occurring for event handlers that
+ return a new :class:`_query.Query` object if this flag is not set.
+
+ .. seealso::
+
+ :meth:`.QueryEvents.before_compile_update`
+
+ :meth:`.QueryEvents.before_compile_delete`
+
+ :ref:`baked_with_before_compile`
+
+ """
+
+ def before_compile_update(self, query, update_context):
+ """Allow modifications to the :class:`_query.Query` object within
+ :meth:`_query.Query.update`.
+
+ .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_update`
+ event is superseded by the much more capable
+ :meth:`_orm.SessionEvents.do_orm_execute` hook.
+
+ Like the :meth:`.QueryEvents.before_compile` event, if the event
+ is to be used to alter the :class:`_query.Query` object, it should
+ be configured with ``retval=True``, and the modified
+ :class:`_query.Query` object returned, as in ::
+
+ @event.listens_for(Query, "before_compile_update", retval=True)
+ def no_deleted(query, update_context):
+ for desc in query.column_descriptions:
+ if desc['type'] is User:
+ entity = desc['entity']
+ query = query.filter(entity.deleted == False)
+
+ update_context.values['timestamp'] = datetime.utcnow()
+ return query
+
+ The ``.values`` dictionary of the "update context" object can also
+ be modified in place as illustrated above.
+
+ :param query: a :class:`_query.Query` instance; this is also
+ the ``.query`` attribute of the given "update context"
+ object.
+
+ :param update_context: an "update context" object which is
+ the same kind of object as described in
+ :paramref:`.QueryEvents.after_bulk_update.update_context`.
+ The object has a ``.values`` attribute in an UPDATE context which is
+ the dictionary of parameters passed to :meth:`_query.Query.update`.
+ This
+ dictionary can be modified to alter the VALUES clause of the
+ resulting UPDATE statement.
+
+ .. versionadded:: 1.2.17
+
+ .. seealso::
+
+ :meth:`.QueryEvents.before_compile`
+
+ :meth:`.QueryEvents.before_compile_delete`
+
+
+ """
+
+ def before_compile_delete(self, query, delete_context):
+ """Allow modifications to the :class:`_query.Query` object within
+ :meth:`_query.Query.delete`.
+
+ .. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_delete`
+ event is superseded by the much more capable
+ :meth:`_orm.SessionEvents.do_orm_execute` hook.
+
+ Like the :meth:`.QueryEvents.before_compile` event, this event
+ should be configured with ``retval=True``, and the modified
+ :class:`_query.Query` object returned, as in ::
+
+ @event.listens_for(Query, "before_compile_delete", retval=True)
+ def no_deleted(query, delete_context):
+ for desc in query.column_descriptions:
+ if desc['type'] is User:
+ entity = desc['entity']
+ query = query.filter(entity.deleted == False)
+ return query
+
+ :param query: a :class:`_query.Query` instance; this is also
+ the ``.query`` attribute of the given "delete context"
+ object.
+
+ :param delete_context: a "delete context" object which is
+ the same kind of object as described in
+ :paramref:`.QueryEvents.after_bulk_delete.delete_context`.
+
+ .. versionadded:: 1.2.17
+
+ .. seealso::
+
+ :meth:`.QueryEvents.before_compile`
+
+ :meth:`.QueryEvents.before_compile_update`
+
+
+ """
+
+ @classmethod
+ def _listen(cls, event_key, retval=False, bake_ok=False, **kw):
+ fn = event_key._listen_fn
+
+ if not retval:
+
+ def wrap(*arg, **kw):
+ if not retval:
+ query = arg[0]
+ fn(*arg, **kw)
+ return query
+ else:
+ return fn(*arg, **kw)
+
+ event_key = event_key.with_wrapper(wrap)
+ else:
+ # don't assume we can apply an attribute to the callable
+ def wrap(*arg, **kw):
+ return fn(*arg, **kw)
+
+ event_key = event_key.with_wrapper(wrap)
+
+ wrap._bake_ok = bake_ok
+
+ event_key.base_listen(**kw)
diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py
new file mode 100644
index 0000000..8dd4d90
--- /dev/null
+++ b/lib/sqlalchemy/orm/exc.py
@@ -0,0 +1,204 @@
+# orm/exc.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""SQLAlchemy ORM exceptions."""
+from .. import exc as sa_exc
+from .. import util
+from ..exc import MultipleResultsFound # noqa
+from ..exc import NoResultFound # noqa
+
+
+NO_STATE = (AttributeError, KeyError)
+"""Exception types that may be raised by instrumentation implementations."""
+
+
+class StaleDataError(sa_exc.SQLAlchemyError):
+ """An operation encountered database state that is unaccounted for.
+
+ Conditions which cause this to happen include:
+
+ * A flush may have attempted to update or delete rows
+ and an unexpected number of rows were matched during
+ the UPDATE or DELETE statement. Note that when
+ version_id_col is used, rows in UPDATE or DELETE statements
+ are also matched against the current known version
+ identifier.
+
+ * A mapped object with version_id_col was refreshed,
+ and the version number coming back from the database does
+ not match that of the object itself.
+
+ * A object is detached from its parent object, however
+ the object was previously attached to a different parent
+ identity which was garbage collected, and a decision
+ cannot be made if the new parent was really the most
+ recent "parent".
+
+ """
+
+
+ConcurrentModificationError = StaleDataError
+
+
+class FlushError(sa_exc.SQLAlchemyError):
+ """A invalid condition was detected during flush()."""
+
+
+class UnmappedError(sa_exc.InvalidRequestError):
+ """Base for exceptions that involve expected mappings not present."""
+
+
+class ObjectDereferencedError(sa_exc.SQLAlchemyError):
+ """An operation cannot complete due to an object being garbage
+ collected.
+
+ """
+
+
+class DetachedInstanceError(sa_exc.SQLAlchemyError):
+ """An attempt to access unloaded attributes on a
+ mapped instance that is detached."""
+
+ code = "bhk3"
+
+
+class UnmappedInstanceError(UnmappedError):
+ """An mapping operation was requested for an unknown instance."""
+
+ @util.preload_module("sqlalchemy.orm.base")
+ def __init__(self, obj, msg=None):
+ base = util.preloaded.orm_base
+
+ if not msg:
+ try:
+ base.class_mapper(type(obj))
+ name = _safe_cls_name(type(obj))
+ msg = (
+ "Class %r is mapped, but this instance lacks "
+ "instrumentation. This occurs when the instance "
+ "is created before sqlalchemy.orm.mapper(%s) "
+ "was called." % (name, name)
+ )
+ except UnmappedClassError:
+ msg = _default_unmapped(type(obj))
+ if isinstance(obj, type):
+ msg += (
+ "; was a class (%s) supplied where an instance was "
+ "required?" % _safe_cls_name(obj)
+ )
+ UnmappedError.__init__(self, msg)
+
+ def __reduce__(self):
+ return self.__class__, (None, self.args[0])
+
+
+class UnmappedClassError(UnmappedError):
+ """An mapping operation was requested for an unknown class."""
+
+ def __init__(self, cls, msg=None):
+ if not msg:
+ msg = _default_unmapped(cls)
+ UnmappedError.__init__(self, msg)
+
+ def __reduce__(self):
+ return self.__class__, (None, self.args[0])
+
+
+class ObjectDeletedError(sa_exc.InvalidRequestError):
+ """A refresh operation failed to retrieve the database
+ row corresponding to an object's known primary key identity.
+
+ A refresh operation proceeds when an expired attribute is
+ accessed on an object, or when :meth:`_query.Query.get` is
+ used to retrieve an object which is, upon retrieval, detected
+ as expired. A SELECT is emitted for the target row
+ based on primary key; if no row is returned, this
+ exception is raised.
+
+ The true meaning of this exception is simply that
+ no row exists for the primary key identifier associated
+ with a persistent object. The row may have been
+ deleted, or in some cases the primary key updated
+ to a new value, outside of the ORM's management of the target
+ object.
+
+ """
+
+ @util.preload_module("sqlalchemy.orm.base")
+ def __init__(self, state, msg=None):
+ base = util.preloaded.orm_base
+
+ if not msg:
+ msg = (
+ "Instance '%s' has been deleted, or its "
+ "row is otherwise not present." % base.state_str(state)
+ )
+
+ sa_exc.InvalidRequestError.__init__(self, msg)
+
+ def __reduce__(self):
+ return self.__class__, (None, self.args[0])
+
+
+class UnmappedColumnError(sa_exc.InvalidRequestError):
+ """Mapping operation was requested on an unknown column."""
+
+
+class LoaderStrategyException(sa_exc.InvalidRequestError):
+ """A loader strategy for an attribute does not exist."""
+
+ def __init__(
+ self,
+ applied_to_property_type,
+ requesting_property,
+ applies_to,
+ actual_strategy_type,
+ strategy_key,
+ ):
+ if actual_strategy_type is None:
+ sa_exc.InvalidRequestError.__init__(
+ self,
+ "Can't find strategy %s for %s"
+ % (strategy_key, requesting_property),
+ )
+ else:
+ sa_exc.InvalidRequestError.__init__(
+ self,
+ 'Can\'t apply "%s" strategy to property "%s", '
+ 'which is a "%s"; this loader strategy is intended '
+ 'to be used with a "%s".'
+ % (
+ util.clsname_as_plain_name(actual_strategy_type),
+ requesting_property,
+ util.clsname_as_plain_name(applied_to_property_type),
+ util.clsname_as_plain_name(applies_to),
+ ),
+ )
+
+
+def _safe_cls_name(cls):
+ try:
+ cls_name = ".".join((cls.__module__, cls.__name__))
+ except AttributeError:
+ cls_name = getattr(cls, "__name__", None)
+ if cls_name is None:
+ cls_name = repr(cls)
+ return cls_name
+
+
+@util.preload_module("sqlalchemy.orm.base")
+def _default_unmapped(cls):
+ base = util.preloaded.orm_base
+
+ try:
+ mappers = base.manager_of_class(cls).mappers
+ except (TypeError,) + NO_STATE:
+ mappers = {}
+ name = _safe_cls_name(cls)
+
+ if not mappers:
+ return "Class '%s' is not mapped" % name
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
new file mode 100644
index 0000000..7de8e2c
--- /dev/null
+++ b/lib/sqlalchemy/orm/identity.py
@@ -0,0 +1,254 @@
+# orm/identity.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+import weakref
+
+from . import util as orm_util
+from .. import exc as sa_exc
+from .. import util
+
+
+class IdentityMap(object):
+ def __init__(self):
+ self._dict = {}
+ self._modified = set()
+ self._wr = weakref.ref(self)
+
+ def _kill(self):
+ self._add_unpresent = _killed
+
+ def keys(self):
+ return self._dict.keys()
+
+ def replace(self, state):
+ raise NotImplementedError()
+
+ def add(self, state):
+ raise NotImplementedError()
+
+ def _add_unpresent(self, state, key):
+ """optional inlined form of add() which can assume item isn't present
+ in the map"""
+ self.add(state)
+
+ def update(self, dict_):
+ raise NotImplementedError("IdentityMap uses add() to insert data")
+
+ def clear(self):
+ raise NotImplementedError("IdentityMap uses remove() to remove data")
+
+ def _manage_incoming_state(self, state):
+ state._instance_dict = self._wr
+
+ if state.modified:
+ self._modified.add(state)
+
+ def _manage_removed_state(self, state):
+ del state._instance_dict
+ if state.modified:
+ self._modified.discard(state)
+
+ def _dirty_states(self):
+ return self._modified
+
+ def check_modified(self):
+ """return True if any InstanceStates present have been marked
+ as 'modified'.
+
+ """
+ return bool(self._modified)
+
+ def has_key(self, key):
+ return key in self
+
+ def popitem(self):
+ raise NotImplementedError("IdentityMap uses remove() to remove data")
+
+ def pop(self, key, *args):
+ raise NotImplementedError("IdentityMap uses remove() to remove data")
+
+ def setdefault(self, key, default=None):
+ raise NotImplementedError("IdentityMap uses add() to insert data")
+
+ def __len__(self):
+ return len(self._dict)
+
+ def copy(self):
+ raise NotImplementedError()
+
+ def __setitem__(self, key, value):
+ raise NotImplementedError("IdentityMap uses add() to insert data")
+
+ def __delitem__(self, key):
+ raise NotImplementedError("IdentityMap uses remove() to remove data")
+
+
+class WeakInstanceDict(IdentityMap):
+ def __getitem__(self, key):
+ state = self._dict[key]
+ o = state.obj()
+ if o is None:
+ raise KeyError(key)
+ return o
+
+ def __contains__(self, key):
+ try:
+ if key in self._dict:
+ state = self._dict[key]
+ o = state.obj()
+ else:
+ return False
+ except KeyError:
+ return False
+ else:
+ return o is not None
+
+ def contains_state(self, state):
+ if state.key in self._dict:
+ try:
+ return self._dict[state.key] is state
+ except KeyError:
+ return False
+ else:
+ return False
+
+ def replace(self, state):
+ if state.key in self._dict:
+ try:
+ existing = self._dict[state.key]
+ except KeyError:
+ # catch gc removed the key after we just checked for it
+ pass
+ else:
+ if existing is not state:
+ self._manage_removed_state(existing)
+ else:
+ return None
+ else:
+ existing = None
+
+ self._dict[state.key] = state
+ self._manage_incoming_state(state)
+ return existing
+
+ def add(self, state):
+ key = state.key
+ # inline of self.__contains__
+ if key in self._dict:
+ try:
+ existing_state = self._dict[key]
+ except KeyError:
+ # catch gc removed the key after we just checked for it
+ pass
+ else:
+ if existing_state is not state:
+ o = existing_state.obj()
+ if o is not None:
+ raise sa_exc.InvalidRequestError(
+ "Can't attach instance "
+ "%s; another instance with key %s is already "
+ "present in this session."
+ % (orm_util.state_str(state), state.key)
+ )
+ else:
+ return False
+ self._dict[key] = state
+ self._manage_incoming_state(state)
+ return True
+
+ def _add_unpresent(self, state, key):
+ # inlined form of add() called by loading.py
+ self._dict[key] = state
+ state._instance_dict = self._wr
+
+ def get(self, key, default=None):
+ if key not in self._dict:
+ return default
+ try:
+ state = self._dict[key]
+ except KeyError:
+ # catch gc removed the key after we just checked for it
+ return default
+ else:
+ o = state.obj()
+ if o is None:
+ return default
+ return o
+
+ def items(self):
+ values = self.all_states()
+ result = []
+ for state in values:
+ value = state.obj()
+ if value is not None:
+ result.append((state.key, value))
+ return result
+
+ def values(self):
+ values = self.all_states()
+ result = []
+ for state in values:
+ value = state.obj()
+ if value is not None:
+ result.append(value)
+
+ return result
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ if util.py2k:
+
+ def iteritems(self):
+ return iter(self.items())
+
+ def itervalues(self):
+ return iter(self.values())
+
+ def all_states(self):
+ if util.py2k:
+ return self._dict.values()
+ else:
+ return list(self._dict.values())
+
+ def _fast_discard(self, state):
+ # used by InstanceState for state being
+ # GC'ed, inlines _managed_removed_state
+ try:
+ st = self._dict[state.key]
+ except KeyError:
+ # catch gc removed the key after we just checked for it
+ pass
+ else:
+ if st is state:
+ self._dict.pop(state.key, None)
+
+ def discard(self, state):
+ self.safe_discard(state)
+
+ def safe_discard(self, state):
+ if state.key in self._dict:
+ try:
+ st = self._dict[state.key]
+ except KeyError:
+ # catch gc removed the key after we just checked for it
+ pass
+ else:
+ if st is state:
+ self._dict.pop(state.key, None)
+ self._manage_removed_state(state)
+
+
+def _killed(state, key):
+ # external function to avoid creating cycles when assigned to
+ # the IdentityMap
+ raise sa_exc.InvalidRequestError(
+ "Object %s cannot be converted to 'persistent' state, as this "
+ "identity map is no longer valid. Has the owning Session "
+ "been closed?" % orm_util.state_str(state),
+ code="lkrp",
+ )
diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py
new file mode 100644
index 0000000..a7023a2
--- /dev/null
+++ b/lib/sqlalchemy/orm/instrumentation.py
@@ -0,0 +1,652 @@
+# orm/instrumentation.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Defines SQLAlchemy's system of class instrumentation.
+
+This module is usually not directly visible to user applications, but
+defines a large part of the ORM's interactivity.
+
+instrumentation.py deals with registration of end-user classes
+for state tracking. It interacts closely with state.py
+and attributes.py which establish per-instance and per-class-attribute
+instrumentation, respectively.
+
+The class instrumentation system can be customized on a per-class
+or global basis using the :mod:`sqlalchemy.ext.instrumentation`
+module, which provides the means to build and specify
+alternate instrumentation forms.
+
+.. versionchanged: 0.8
+ The instrumentation extension system was moved out of the
+ ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
+ package. When that package is imported, it installs
+ itself within sqlalchemy.orm so that its more comprehensive
+ resolution mechanics take effect.
+
+"""
+
+
+import weakref
+
+from . import base
+from . import collections
+from . import exc
+from . import interfaces
+from . import state
+from .. import util
+from ..util import HasMemoized
+
+
+DEL_ATTR = util.symbol("DEL_ATTR")
+
+
+class ClassManager(HasMemoized, dict):
+ """Tracks state information at the class level."""
+
+ MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
+ STATE_ATTR = base.DEFAULT_STATE_ATTR
+
+ _state_setter = staticmethod(util.attrsetter(STATE_ATTR))
+
+ expired_attribute_loader = None
+ "previously known as deferred_scalar_loader"
+
+ init_method = None
+
+ factory = None
+ mapper = None
+ declarative_scan = None
+ registry = None
+
+ @property
+ @util.deprecated(
+ "1.4",
+ message="The ClassManager.deferred_scalar_loader attribute is now "
+ "named expired_attribute_loader",
+ )
+ def deferred_scalar_loader(self):
+ return self.expired_attribute_loader
+
+ @deferred_scalar_loader.setter
+ @util.deprecated(
+ "1.4",
+ message="The ClassManager.deferred_scalar_loader attribute is now "
+ "named expired_attribute_loader",
+ )
+ def deferred_scalar_loader(self, obj):
+ self.expired_attribute_loader = obj
+
+ def __init__(self, class_):
+ self.class_ = class_
+ self.info = {}
+ self.new_init = None
+ self.local_attrs = {}
+ self.originals = {}
+ self._finalized = False
+
+ self._bases = [
+ mgr
+ for mgr in [
+ manager_of_class(base)
+ for base in self.class_.__bases__
+ if isinstance(base, type)
+ ]
+ if mgr is not None
+ ]
+
+ for base_ in self._bases:
+ self.update(base_)
+
+ self.dispatch._events._new_classmanager_instance(class_, self)
+
+ for basecls in class_.__mro__:
+ mgr = manager_of_class(basecls)
+ if mgr is not None:
+ self.dispatch._update(mgr.dispatch)
+
+ self.manage()
+
+ if "__del__" in class_.__dict__:
+ util.warn(
+ "__del__() method on class %s will "
+ "cause unreachable cycles and memory leaks, "
+ "as SQLAlchemy instrumentation often creates "
+ "reference cycles. Please remove this method." % class_
+ )
+
+ def _update_state(
+ self,
+ finalize=False,
+ mapper=None,
+ registry=None,
+ declarative_scan=None,
+ expired_attribute_loader=None,
+ init_method=None,
+ ):
+
+ if mapper:
+ self.mapper = mapper
+ if registry:
+ registry._add_manager(self)
+ if declarative_scan:
+ self.declarative_scan = weakref.ref(declarative_scan)
+ if expired_attribute_loader:
+ self.expired_attribute_loader = expired_attribute_loader
+
+ if init_method:
+ assert not self._finalized, (
+ "class is already instrumented, "
+ "init_method %s can't be applied" % init_method
+ )
+ self.init_method = init_method
+
+ if not self._finalized:
+ self.original_init = (
+ self.init_method
+ if self.init_method is not None
+ and self.class_.__init__ is object.__init__
+ else self.class_.__init__
+ )
+
+ if finalize and not self._finalized:
+ self._finalize()
+
+ def _finalize(self):
+ if self._finalized:
+ return
+ self._finalized = True
+
+ self._instrument_init()
+
+ _instrumentation_factory.dispatch.class_instrument(self.class_)
+
+ def __hash__(self):
+ return id(self)
+
+ def __eq__(self, other):
+ return other is self
+
+ @property
+ def is_mapped(self):
+ return "mapper" in self.__dict__
+
+ @HasMemoized.memoized_attribute
+ def _all_key_set(self):
+ return frozenset(self)
+
+ @HasMemoized.memoized_attribute
+ def _collection_impl_keys(self):
+ return frozenset(
+ [attr.key for attr in self.values() if attr.impl.collection]
+ )
+
+ @HasMemoized.memoized_attribute
+ def _scalar_loader_impls(self):
+ return frozenset(
+ [
+ attr.impl
+ for attr in self.values()
+ if attr.impl.accepts_scalar_loader
+ ]
+ )
+
+ @HasMemoized.memoized_attribute
+ def _loader_impls(self):
+ return frozenset([attr.impl for attr in self.values()])
+
+ @util.memoized_property
+ def mapper(self):
+ # raises unless self.mapper has been assigned
+ raise exc.UnmappedClassError(self.class_)
+
+ def _all_sqla_attributes(self, exclude=None):
+ """return an iterator of all classbound attributes that are
+ implement :class:`.InspectionAttr`.
+
+ This includes :class:`.QueryableAttribute` as well as extension
+ types such as :class:`.hybrid_property` and
+ :class:`.AssociationProxy`.
+
+ """
+
+ found = {}
+
+ # constraints:
+ # 1. yield keys in cls.__dict__ order
+ # 2. if a subclass has the same key as a superclass, include that
+ # key as part of the ordering of the superclass, because an
+ # overridden key is usually installed by the mapper which is going
+ # on a different ordering
+ # 3. don't use getattr() as this fires off descriptors
+
+ for supercls in self.class_.__mro__[0:-1]:
+ inherits = supercls.__mro__[1]
+ for key in supercls.__dict__:
+ found.setdefault(key, supercls)
+ if key in inherits.__dict__:
+ continue
+ val = found[key].__dict__[key]
+ if (
+ isinstance(val, interfaces.InspectionAttr)
+ and val.is_attribute
+ ):
+ yield key, val
+
+ def _get_class_attr_mro(self, key, default=None):
+ """return an attribute on the class without tripping it."""
+
+ for supercls in self.class_.__mro__:
+ if key in supercls.__dict__:
+ return supercls.__dict__[key]
+ else:
+ return default
+
+ def _attr_has_impl(self, key):
+ """Return True if the given attribute is fully initialized.
+
+ i.e. has an impl.
+ """
+
+ return key in self and self[key].impl is not None
+
+ def _subclass_manager(self, cls):
+ """Create a new ClassManager for a subclass of this ClassManager's
+ class.
+
+ This is called automatically when attributes are instrumented so that
+ the attributes can be propagated to subclasses against their own
+ class-local manager, without the need for mappers etc. to have already
+ pre-configured managers for the full class hierarchy. Mappers
+ can post-configure the auto-generated ClassManager when needed.
+
+ """
+ return register_class(cls, finalize=False)
+
+ def _instrument_init(self):
+ self.new_init = _generate_init(self.class_, self, self.original_init)
+ self.install_member("__init__", self.new_init)
+
+ @util.memoized_property
+ def _state_constructor(self):
+ self.dispatch.first_init(self, self.class_)
+ return state.InstanceState
+
+ def manage(self):
+ """Mark this instance as the manager for its class."""
+
+ setattr(self.class_, self.MANAGER_ATTR, self)
+
+ @util.hybridmethod
+ def manager_getter(self):
+ return _default_manager_getter
+
+ @util.hybridmethod
+ def state_getter(self):
+ """Return a (instance) -> InstanceState callable.
+
+ "state getter" callables should raise either KeyError or
+ AttributeError if no InstanceState could be found for the
+ instance.
+ """
+
+ return _default_state_getter
+
+ @util.hybridmethod
+ def dict_getter(self):
+ return _default_dict_getter
+
+ def instrument_attribute(self, key, inst, propagated=False):
+ if propagated:
+ if key in self.local_attrs:
+ return # don't override local attr with inherited attr
+ else:
+ self.local_attrs[key] = inst
+ self.install_descriptor(key, inst)
+ self._reset_memoizations()
+ self[key] = inst
+
+ for cls in self.class_.__subclasses__():
+ manager = self._subclass_manager(cls)
+ manager.instrument_attribute(key, inst, True)
+
+ def subclass_managers(self, recursive):
+ for cls in self.class_.__subclasses__():
+ mgr = manager_of_class(cls)
+ if mgr is not None and mgr is not self:
+ yield mgr
+ if recursive:
+ for m in mgr.subclass_managers(True):
+ yield m
+
+ def post_configure_attribute(self, key):
+ _instrumentation_factory.dispatch.attribute_instrument(
+ self.class_, key, self[key]
+ )
+
+ def uninstrument_attribute(self, key, propagated=False):
+ if key not in self:
+ return
+ if propagated:
+ if key in self.local_attrs:
+ return # don't get rid of local attr
+ else:
+ del self.local_attrs[key]
+ self.uninstall_descriptor(key)
+ self._reset_memoizations()
+ del self[key]
+ for cls in self.class_.__subclasses__():
+ manager = manager_of_class(cls)
+ if manager:
+ manager.uninstrument_attribute(key, True)
+
+ def unregister(self):
+ """remove all instrumentation established by this ClassManager."""
+
+ for key in list(self.originals):
+ self.uninstall_member(key)
+
+ self.mapper = self.dispatch = self.new_init = None
+ self.info.clear()
+
+ for key in list(self):
+ if key in self.local_attrs:
+ self.uninstrument_attribute(key)
+
+ if self.MANAGER_ATTR in self.class_.__dict__:
+ delattr(self.class_, self.MANAGER_ATTR)
+
+ def install_descriptor(self, key, inst):
+ if key in (self.STATE_ATTR, self.MANAGER_ATTR):
+ raise KeyError(
+ "%r: requested attribute name conflicts with "
+ "instrumentation attribute of the same name." % key
+ )
+ setattr(self.class_, key, inst)
+
+ def uninstall_descriptor(self, key):
+ delattr(self.class_, key)
+
+ def install_member(self, key, implementation):
+ if key in (self.STATE_ATTR, self.MANAGER_ATTR):
+ raise KeyError(
+ "%r: requested attribute name conflicts with "
+ "instrumentation attribute of the same name." % key
+ )
+ self.originals.setdefault(key, self.class_.__dict__.get(key, DEL_ATTR))
+ setattr(self.class_, key, implementation)
+
+ def uninstall_member(self, key):
+ original = self.originals.pop(key, None)
+ if original is not DEL_ATTR:
+ setattr(self.class_, key, original)
+ else:
+ delattr(self.class_, key)
+
+ def instrument_collection_class(self, key, collection_class):
+ return collections.prepare_instrumentation(collection_class)
+
+ def initialize_collection(self, key, state, factory):
+ user_data = factory()
+ adapter = collections.CollectionAdapter(
+ self.get_impl(key), state, user_data
+ )
+ return adapter, user_data
+
+ def is_instrumented(self, key, search=False):
+ if search:
+ return key in self
+ else:
+ return key in self.local_attrs
+
+ def get_impl(self, key):
+ return self[key].impl
+
+ @property
+ def attributes(self):
+ return iter(self.values())
+
+ # InstanceState management
+
+ def new_instance(self, state=None):
+ instance = self.class_.__new__(self.class_)
+ if state is None:
+ state = self._state_constructor(instance, self)
+ self._state_setter(instance, state)
+ return instance
+
+ def setup_instance(self, instance, state=None):
+ if state is None:
+ state = self._state_constructor(instance, self)
+ self._state_setter(instance, state)
+
+ def teardown_instance(self, instance):
+ delattr(instance, self.STATE_ATTR)
+
+ def _serialize(self, state, state_dict):
+ return _SerializeManager(state, state_dict)
+
+ def _new_state_if_none(self, instance):
+ """Install a default InstanceState if none is present.
+
+ A private convenience method used by the __init__ decorator.
+
+ """
+ if hasattr(instance, self.STATE_ATTR):
+ return False
+ elif self.class_ is not instance.__class__ and self.is_mapped:
+ # this will create a new ClassManager for the
+ # subclass, without a mapper. This is likely a
+ # user error situation but allow the object
+ # to be constructed, so that it is usable
+ # in a non-ORM context at least.
+ return self._subclass_manager(
+ instance.__class__
+ )._new_state_if_none(instance)
+ else:
+ state = self._state_constructor(instance, self)
+ self._state_setter(instance, state)
+ return state
+
+ def has_state(self, instance):
+ return hasattr(instance, self.STATE_ATTR)
+
+ def has_parent(self, state, key, optimistic=False):
+ """TODO"""
+ return self.get_impl(key).hasparent(state, optimistic=optimistic)
+
+ def __bool__(self):
+ """All ClassManagers are non-zero regardless of attribute state."""
+ return True
+
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return "<%s of %r at %x>" % (
+ self.__class__.__name__,
+ self.class_,
+ id(self),
+ )
+
+
+class _SerializeManager(object):
+ """Provide serialization of a :class:`.ClassManager`.
+
+ The :class:`.InstanceState` uses ``__init__()`` on serialize
+ and ``__call__()`` on deserialize.
+
+ """
+
+ def __init__(self, state, d):
+ self.class_ = state.class_
+ manager = state.manager
+ manager.dispatch.pickle(state, d)
+
+ def __call__(self, state, inst, state_dict):
+ state.manager = manager = manager_of_class(self.class_)
+ if manager is None:
+ raise exc.UnmappedInstanceError(
+ inst,
+ "Cannot deserialize object of type %r - "
+ "no mapper() has "
+ "been configured for this class within the current "
+ "Python process!" % self.class_,
+ )
+ elif manager.is_mapped and not manager.mapper.configured:
+ manager.mapper._check_configure()
+
+ # setup _sa_instance_state ahead of time so that
+ # unpickle events can access the object normally.
+ # see [ticket:2362]
+ if inst is not None:
+ manager.setup_instance(inst, state)
+ manager.dispatch.unpickle(state, state_dict)
+
+
+class InstrumentationFactory(object):
+ """Factory for new ClassManager instances."""
+
+ def create_manager_for_cls(self, class_):
+ assert class_ is not None
+ assert manager_of_class(class_) is None
+
+ # give a more complicated subclass
+ # a chance to do what it wants here
+ manager, factory = self._locate_extended_factory(class_)
+
+ if factory is None:
+ factory = ClassManager
+ manager = factory(class_)
+
+ self._check_conflicts(class_, factory)
+
+ manager.factory = factory
+
+ return manager
+
+ def _locate_extended_factory(self, class_):
+ """Overridden by a subclass to do an extended lookup."""
+ return None, None
+
+ def _check_conflicts(self, class_, factory):
+ """Overridden by a subclass to test for conflicting factories."""
+ return
+
+ def unregister(self, class_):
+ manager = manager_of_class(class_)
+ manager.unregister()
+ self.dispatch.class_uninstrument(class_)
+
+
+# this attribute is replaced by sqlalchemy.ext.instrumentation
+# when imported.
+_instrumentation_factory = InstrumentationFactory()
+
+# these attributes are replaced by sqlalchemy.ext.instrumentation
+# when a non-standard InstrumentationManager class is first
+# used to instrument a class.
+instance_state = _default_state_getter = base.instance_state
+
+instance_dict = _default_dict_getter = base.instance_dict
+
+manager_of_class = _default_manager_getter = base.manager_of_class
+
+
+def register_class(
+ class_,
+ finalize=True,
+ mapper=None,
+ registry=None,
+ declarative_scan=None,
+ expired_attribute_loader=None,
+ init_method=None,
+):
+ """Register class instrumentation.
+
+ Returns the existing or newly created class manager.
+
+ """
+
+ manager = manager_of_class(class_)
+ if manager is None:
+ manager = _instrumentation_factory.create_manager_for_cls(class_)
+ manager._update_state(
+ mapper=mapper,
+ registry=registry,
+ declarative_scan=declarative_scan,
+ expired_attribute_loader=expired_attribute_loader,
+ init_method=init_method,
+ finalize=finalize,
+ )
+
+ return manager
+
+
+def unregister_class(class_):
+ """Unregister class instrumentation."""
+
+ _instrumentation_factory.unregister(class_)
+
+
+def is_instrumented(instance, key):
+ """Return True if the given attribute on the given instance is
+ instrumented by the attributes package.
+
+ This function may be used regardless of instrumentation
+ applied directly to the class, i.e. no descriptors are required.
+
+ """
+ return manager_of_class(instance.__class__).is_instrumented(
+ key, search=True
+ )
+
+
+def _generate_init(class_, class_manager, original_init):
+ """Build an __init__ decorator that triggers ClassManager events."""
+
+ # TODO: we should use the ClassManager's notion of the
+ # original '__init__' method, once ClassManager is fixed
+ # to always reference that.
+
+ if original_init is None:
+ original_init = class_.__init__
+
+ # Go through some effort here and don't change the user's __init__
+ # calling signature, including the unlikely case that it has
+ # a return value.
+ # FIXME: need to juggle local names to avoid constructor argument
+ # clashes.
+ func_body = """\
+def __init__(%(apply_pos)s):
+ new_state = class_manager._new_state_if_none(%(self_arg)s)
+ if new_state:
+ return new_state._initialize_instance(%(apply_kw)s)
+ else:
+ return original_init(%(apply_kw)s)
+"""
+ func_vars = util.format_argspec_init(original_init, grouped=False)
+ func_text = func_body % func_vars
+
+ if util.py2k:
+ func = getattr(original_init, "im_func", original_init)
+ func_defaults = getattr(func, "func_defaults", None)
+ else:
+ func_defaults = getattr(original_init, "__defaults__", None)
+ func_kw_defaults = getattr(original_init, "__kwdefaults__", None)
+
+ env = locals().copy()
+ env["__name__"] = __name__
+ exec(func_text, env)
+ __init__ = env["__init__"]
+ __init__.__doc__ = original_init.__doc__
+ __init__._sa_original_init = original_init
+
+ if func_defaults:
+ __init__.__defaults__ = func_defaults
+ if not util.py2k and func_kw_defaults:
+ __init__.__kwdefaults__ = func_kw_defaults
+
+ return __init__
diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py
new file mode 100644
index 0000000..63295d0
--- /dev/null
+++ b/lib/sqlalchemy/orm/interfaces.py
@@ -0,0 +1,978 @@
+# orm/interfaces.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""
+
+Contains various base classes used throughout the ORM.
+
+Defines some key base classes prominent within the internals.
+
+This module and the classes within are mostly private, though some attributes
+are exposed when inspecting mappings.
+
+"""
+
+from __future__ import absolute_import
+
+import collections
+
+from . import exc as orm_exc
+from . import path_registry
+from .base import _MappedAttribute # noqa
+from .base import EXT_CONTINUE
+from .base import EXT_SKIP
+from .base import EXT_STOP
+from .base import InspectionAttr # noqa
+from .base import InspectionAttrInfo # noqa
+from .base import MANYTOMANY
+from .base import MANYTOONE
+from .base import NOT_EXTENSION
+from .base import ONETOMANY
+from .. import inspect
+from .. import inspection
+from .. import util
+from ..sql import operators
+from ..sql import roles
+from ..sql import visitors
+from ..sql.base import ExecutableOption
+from ..sql.traversals import HasCacheKey
+
+
+__all__ = (
+ "EXT_CONTINUE",
+ "EXT_STOP",
+ "EXT_SKIP",
+ "ONETOMANY",
+ "MANYTOMANY",
+ "MANYTOONE",
+ "NOT_EXTENSION",
+ "LoaderStrategy",
+ "MapperOption",
+ "LoaderOption",
+ "MapperProperty",
+ "PropComparator",
+ "StrategizedProperty",
+)
+
+
+class ORMStatementRole(roles.StatementRole):
+ _role_name = (
+ "Executable SQL or text() construct, including ORM " "aware objects"
+ )
+
+
+class ORMColumnsClauseRole(roles.ColumnsClauseRole):
+ _role_name = "ORM mapped entity, aliased entity, or Column expression"
+
+
+class ORMEntityColumnsClauseRole(ORMColumnsClauseRole):
+ _role_name = "ORM mapped or aliased entity"
+
+
+class ORMFromClauseRole(roles.StrictFromClauseRole):
+ _role_name = "ORM mapped entity, aliased entity, or FROM expression"
+
+
+@inspection._self_inspects
+class MapperProperty(
+ HasCacheKey, _MappedAttribute, InspectionAttr, util.MemoizedSlots
+):
+ """Represent a particular class attribute mapped by :class:`_orm.Mapper`.
+
+ The most common occurrences of :class:`.MapperProperty` are the
+ mapped :class:`_schema.Column`, which is represented in a mapping as
+ an instance of :class:`.ColumnProperty`,
+ and a reference to another class produced by :func:`_orm.relationship`,
+ represented in the mapping as an instance of
+ :class:`.RelationshipProperty`.
+
+ """
+
+ __slots__ = (
+ "_configure_started",
+ "_configure_finished",
+ "parent",
+ "key",
+ "info",
+ )
+
+ _cache_key_traversal = [
+ ("parent", visitors.ExtendedInternalTraversal.dp_has_cache_key),
+ ("key", visitors.ExtendedInternalTraversal.dp_string),
+ ]
+
+ cascade = frozenset()
+ """The set of 'cascade' attribute names.
+
+ This collection is checked before the 'cascade_iterator' method is called.
+
+ The collection typically only applies to a RelationshipProperty.
+
+ """
+
+ is_property = True
+ """Part of the InspectionAttr interface; states this object is a
+ mapper property.
+
+ """
+
+ @property
+ def _links_to_entity(self):
+ """True if this MapperProperty refers to a mapped entity.
+
+ Should only be True for RelationshipProperty, False for all others.
+
+ """
+ raise NotImplementedError()
+
+ def _memoized_attr_info(self):
+ """Info dictionary associated with the object, allowing user-defined
+ data to be associated with this :class:`.InspectionAttr`.
+
+ The dictionary is generated when first accessed. Alternatively,
+ it can be specified as a constructor argument to the
+ :func:`.column_property`, :func:`_orm.relationship`, or
+ :func:`.composite`
+ functions.
+
+ .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
+ available on extension types via the
+ :attr:`.InspectionAttrInfo.info` attribute, so that it can apply
+ to a wider variety of ORM and extension constructs.
+
+ .. seealso::
+
+ :attr:`.QueryableAttribute.info`
+
+ :attr:`.SchemaItem.info`
+
+ """
+ return {}
+
+ def setup(self, context, query_entity, path, adapter, **kwargs):
+ """Called by Query for the purposes of constructing a SQL statement.
+
+ Each MapperProperty associated with the target mapper processes the
+ statement referenced by the query context, adding columns and/or
+ criterion as appropriate.
+
+ """
+
+ def create_row_processor(
+ self, context, query_entity, path, mapper, result, adapter, populators
+ ):
+ """Produce row processing functions and append to the given
+ set of populators lists.
+
+ """
+
+ def cascade_iterator(
+ self, type_, state, dict_, visited_states, halt_on=None
+ ):
+ """Iterate through instances related to the given instance for
+ a particular 'cascade', starting with this MapperProperty.
+
+ Return an iterator3-tuples (instance, mapper, state).
+
+ Note that the 'cascade' collection on this MapperProperty is
+ checked first for the given type before cascade_iterator is called.
+
+ This method typically only applies to RelationshipProperty.
+
+ """
+
+ return iter(())
+
+ def set_parent(self, parent, init):
+ """Set the parent mapper that references this MapperProperty.
+
+ This method is overridden by some subclasses to perform extra
+ setup when the mapper is first known.
+
+ """
+ self.parent = parent
+
+ def instrument_class(self, mapper):
+ """Hook called by the Mapper to the property to initiate
+ instrumentation of the class attribute managed by this
+ MapperProperty.
+
+ The MapperProperty here will typically call out to the
+ attributes module to set up an InstrumentedAttribute.
+
+ This step is the first of two steps to set up an InstrumentedAttribute,
+ and is called early in the mapper setup process.
+
+ The second step is typically the init_class_attribute step,
+ called from StrategizedProperty via the post_instrument_class()
+ hook. This step assigns additional state to the InstrumentedAttribute
+ (specifically the "impl") which has been determined after the
+ MapperProperty has determined what kind of persistence
+ management it needs to do (e.g. scalar, object, collection, etc).
+
+ """
+
+ def __init__(self):
+ self._configure_started = False
+ self._configure_finished = False
+
+ def init(self):
+ """Called after all mappers are created to assemble
+ relationships between mappers and perform other post-mapper-creation
+ initialization steps.
+
+
+ """
+ self._configure_started = True
+ self.do_init()
+ self._configure_finished = True
+
+ @property
+ def class_attribute(self):
+ """Return the class-bound descriptor corresponding to this
+ :class:`.MapperProperty`.
+
+ This is basically a ``getattr()`` call::
+
+ return getattr(self.parent.class_, self.key)
+
+ I.e. if this :class:`.MapperProperty` were named ``addresses``,
+ and the class to which it is mapped is ``User``, this sequence
+ is possible::
+
+ >>> from sqlalchemy import inspect
+ >>> mapper = inspect(User)
+ >>> addresses_property = mapper.attrs.addresses
+ >>> addresses_property.class_attribute is User.addresses
+ True
+ >>> User.addresses.property is addresses_property
+ True
+
+
+ """
+
+ return getattr(self.parent.class_, self.key)
+
+ def do_init(self):
+ """Perform subclass-specific initialization post-mapper-creation
+ steps.
+
+ This is a template method called by the ``MapperProperty``
+ object's init() method.
+
+ """
+
+ def post_instrument_class(self, mapper):
+ """Perform instrumentation adjustments that need to occur
+ after init() has completed.
+
+ The given Mapper is the Mapper invoking the operation, which
+ may not be the same Mapper as self.parent in an inheritance
+ scenario; however, Mapper will always at least be a sub-mapper of
+ self.parent.
+
+ This method is typically used by StrategizedProperty, which delegates
+ it to LoaderStrategy.init_class_attribute() to perform final setup
+ on the class-bound InstrumentedAttribute.
+
+ """
+
+ def merge(
+ self,
+ session,
+ source_state,
+ source_dict,
+ dest_state,
+ dest_dict,
+ load,
+ _recursive,
+ _resolve_conflict_map,
+ ):
+ """Merge the attribute represented by this ``MapperProperty``
+ from source to destination object.
+
+ """
+
+ def __repr__(self):
+ return "<%s at 0x%x; %s>" % (
+ self.__class__.__name__,
+ id(self),
+ getattr(self, "key", "no key"),
+ )
+
+
+@inspection._self_inspects
+class PropComparator(operators.ColumnOperators):
+ r"""Defines SQL operators for :class:`.MapperProperty` objects.
+
+ SQLAlchemy allows for operators to
+ be redefined at both the Core and ORM level. :class:`.PropComparator`
+ is the base class of operator redefinition for ORM-level operations,
+ including those of :class:`.ColumnProperty`,
+ :class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
+
+ .. note:: With the advent of Hybrid properties introduced in SQLAlchemy
+ 0.7, as well as Core-level operator redefinition in
+ SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
+ instances is extremely rare. See :ref:`hybrids_toplevel` as well
+ as :ref:`types_operators`.
+
+ User-defined subclasses of :class:`.PropComparator` may be created. The
+ built-in Python comparison and math operator methods, such as
+ :meth:`.operators.ColumnOperators.__eq__`,
+ :meth:`.operators.ColumnOperators.__lt__`, and
+ :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
+ new operator behavior. The custom :class:`.PropComparator` is passed to
+ the :class:`.MapperProperty` instance via the ``comparator_factory``
+ argument. In each case,
+ the appropriate subclass of :class:`.PropComparator` should be used::
+
+ # definition of custom PropComparator subclasses
+
+ from sqlalchemy.orm.properties import \
+ ColumnProperty,\
+ CompositeProperty,\
+ RelationshipProperty
+
+ class MyColumnComparator(ColumnProperty.Comparator):
+ def __eq__(self, other):
+ return self.__clause_element__() == other
+
+ class MyRelationshipComparator(RelationshipProperty.Comparator):
+ def any(self, expression):
+ "define the 'any' operation"
+ # ...
+
+ class MyCompositeComparator(CompositeProperty.Comparator):
+ def __gt__(self, other):
+ "redefine the 'greater than' operation"
+
+ return sql.and_(*[a>b for a, b in
+ zip(self.__clause_element__().clauses,
+ other.__composite_values__())])
+
+
+ # application of custom PropComparator subclasses
+
+ from sqlalchemy.orm import column_property, relationship, composite
+ from sqlalchemy import Column, String
+
+ class SomeMappedClass(Base):
+ some_column = column_property(Column("some_column", String),
+ comparator_factory=MyColumnComparator)
+
+ some_relationship = relationship(SomeOtherClass,
+ comparator_factory=MyRelationshipComparator)
+
+ some_composite = composite(
+ Column("a", String), Column("b", String),
+ comparator_factory=MyCompositeComparator
+ )
+
+ Note that for column-level operator redefinition, it's usually
+ simpler to define the operators at the Core level, using the
+ :attr:`.TypeEngine.comparator_factory` attribute. See
+ :ref:`types_operators` for more detail.
+
+ .. seealso::
+
+ :class:`.ColumnProperty.Comparator`
+
+ :class:`.RelationshipProperty.Comparator`
+
+ :class:`.CompositeProperty.Comparator`
+
+ :class:`.ColumnOperators`
+
+ :ref:`types_operators`
+
+ :attr:`.TypeEngine.comparator_factory`
+
+ """
+
+ __slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
+
+ __visit_name__ = "orm_prop_comparator"
+
+ def __init__(
+ self,
+ prop,
+ parentmapper,
+ adapt_to_entity=None,
+ ):
+ self.prop = self.property = prop
+ self._parententity = adapt_to_entity or parentmapper
+ self._adapt_to_entity = adapt_to_entity
+
+ def __clause_element__(self):
+ raise NotImplementedError("%r" % self)
+
+ def _bulk_update_tuples(self, value):
+ """Receive a SQL expression that represents a value in the SET
+ clause of an UPDATE statement.
+
+ Return a tuple that can be passed to a :class:`_expression.Update`
+ construct.
+
+ """
+
+ return [(self.__clause_element__(), value)]
+
+ def adapt_to_entity(self, adapt_to_entity):
+ """Return a copy of this PropComparator which will use the given
+ :class:`.AliasedInsp` to produce corresponding expressions.
+ """
+ return self.__class__(self.prop, self._parententity, adapt_to_entity)
+
+ @property
+ def _parentmapper(self):
+ """legacy; this is renamed to _parententity to be
+ compatible with QueryableAttribute."""
+ return inspect(self._parententity).mapper
+
+ @property
+ def _propagate_attrs(self):
+ # this suits the case in coercions where we don't actually
+ # call ``__clause_element__()`` but still need to get
+ # resolved._propagate_attrs. See #6558.
+ return util.immutabledict(
+ {
+ "compile_state_plugin": "orm",
+ "plugin_subject": self._parentmapper,
+ }
+ )
+
+ @property
+ def adapter(self):
+ """Produce a callable that adapts column expressions
+ to suit an aliased version of this comparator.
+
+ """
+ if self._adapt_to_entity is None:
+ return None
+ else:
+ return self._adapt_to_entity._adapt_element
+
+ @property
+ def info(self):
+ return self.property.info
+
+ @staticmethod
+ def any_op(a, b, **kwargs):
+ return a.any(b, **kwargs)
+
+ @staticmethod
+ def has_op(a, b, **kwargs):
+ return a.has(b, **kwargs)
+
+ @staticmethod
+ def of_type_op(a, class_):
+ return a.of_type(class_)
+
+ def of_type(self, class_):
+ r"""Redefine this object in terms of a polymorphic subclass,
+ :func:`_orm.with_polymorphic` construct, or :func:`_orm.aliased`
+ construct.
+
+ Returns a new PropComparator from which further criterion can be
+ evaluated.
+
+ e.g.::
+
+ query.join(Company.employees.of_type(Engineer)).\
+ filter(Engineer.name=='foo')
+
+ :param \class_: a class or mapper indicating that criterion will be
+ against this specific subclass.
+
+ .. seealso::
+
+ :ref:`queryguide_join_onclause` - in the :ref:`queryguide_toplevel`
+
+ :ref:`inheritance_of_type`
+
+ """
+
+ return self.operate(PropComparator.of_type_op, class_)
+
+ def and_(self, *criteria):
+ """Add additional criteria to the ON clause that's represented by this
+ relationship attribute.
+
+ E.g.::
+
+
+ stmt = select(User).join(
+ User.addresses.and_(Address.email_address != 'foo')
+ )
+
+ stmt = select(User).options(
+ joinedload(User.addresses.and_(Address.email_address != 'foo'))
+ )
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`orm_queryguide_join_on_augmented`
+
+ :ref:`loader_option_criteria`
+
+ :func:`.with_loader_criteria`
+
+ """
+ return self.operate(operators.and_, *criteria)
+
+ def any(self, criterion=None, **kwargs):
+ r"""Return true if this collection contains any member that meets the
+ given criterion.
+
+ The usual implementation of ``any()`` is
+ :meth:`.RelationshipProperty.Comparator.any`.
+
+ :param criterion: an optional ClauseElement formulated against the
+ member class' table or attributes.
+
+ :param \**kwargs: key/value pairs corresponding to member class
+ attribute names which will be compared via equality to the
+ corresponding values.
+
+ """
+
+ return self.operate(PropComparator.any_op, criterion, **kwargs)
+
+ def has(self, criterion=None, **kwargs):
+ r"""Return true if this element references a member which meets the
+ given criterion.
+
+ The usual implementation of ``has()`` is
+ :meth:`.RelationshipProperty.Comparator.has`.
+
+ :param criterion: an optional ClauseElement formulated against the
+ member class' table or attributes.
+
+ :param \**kwargs: key/value pairs corresponding to member class
+ attribute names which will be compared via equality to the
+ corresponding values.
+
+ """
+
+ return self.operate(PropComparator.has_op, criterion, **kwargs)
+
+
+class StrategizedProperty(MapperProperty):
+ """A MapperProperty which uses selectable strategies to affect
+ loading behavior.
+
+ There is a single strategy selected by default. Alternate
+ strategies can be selected at Query time through the usage of
+ ``StrategizedOption`` objects via the Query.options() method.
+
+ The mechanics of StrategizedProperty are used for every Query
+ invocation for every mapped attribute participating in that Query,
+ to determine first how the attribute will be rendered in SQL
+ and secondly how the attribute will retrieve a value from a result
+ row and apply it to a mapped object. The routines here are very
+ performance-critical.
+
+ """
+
+ __slots__ = (
+ "_strategies",
+ "strategy",
+ "_wildcard_token",
+ "_default_path_loader_key",
+ )
+ inherit_cache = True
+ strategy_wildcard_key = None
+
+ def _memoized_attr__wildcard_token(self):
+ return (
+ "%s:%s"
+ % (self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN),
+ )
+
+ def _memoized_attr__default_path_loader_key(self):
+ return (
+ "loader",
+ (
+ "%s:%s"
+ % (self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN),
+ ),
+ )
+
+ def _get_context_loader(self, context, path):
+ load = None
+
+ search_path = path[self]
+
+ # search among: exact match, "attr.*", "default" strategy
+ # if any.
+ for path_key in (
+ search_path._loader_key,
+ search_path._wildcard_path_loader_key,
+ search_path._default_path_loader_key,
+ ):
+ if path_key in context.attributes:
+ load = context.attributes[path_key]
+ break
+
+ return load
+
+ def _get_strategy(self, key):
+ try:
+ return self._strategies[key]
+ except KeyError:
+ pass
+
+ # run outside to prevent transfer of exception context
+ cls = self._strategy_lookup(self, *key)
+ # this previously was setting self._strategies[cls], that's
+ # a bad idea; should use strategy key at all times because every
+ # strategy has multiple keys at this point
+ self._strategies[key] = strategy = cls(self, key)
+ return strategy
+
+ def setup(self, context, query_entity, path, adapter, **kwargs):
+ loader = self._get_context_loader(context, path)
+ if loader and loader.strategy:
+ strat = self._get_strategy(loader.strategy)
+ else:
+ strat = self.strategy
+ strat.setup_query(
+ context, query_entity, path, loader, adapter, **kwargs
+ )
+
+ def create_row_processor(
+ self, context, query_entity, path, mapper, result, adapter, populators
+ ):
+ loader = self._get_context_loader(context, path)
+ if loader and loader.strategy:
+ strat = self._get_strategy(loader.strategy)
+ else:
+ strat = self.strategy
+ strat.create_row_processor(
+ context,
+ query_entity,
+ path,
+ loader,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+
+ def do_init(self):
+ self._strategies = {}
+ self.strategy = self._get_strategy(self.strategy_key)
+
+ def post_instrument_class(self, mapper):
+ if (
+ not self.parent.non_primary
+ and not mapper.class_manager._attr_has_impl(self.key)
+ ):
+ self.strategy.init_class_attribute(mapper)
+
+ _all_strategies = collections.defaultdict(dict)
+
+ @classmethod
+ def strategy_for(cls, **kw):
+ def decorate(dec_cls):
+ # ensure each subclass of the strategy has its
+ # own _strategy_keys collection
+ if "_strategy_keys" not in dec_cls.__dict__:
+ dec_cls._strategy_keys = []
+ key = tuple(sorted(kw.items()))
+ cls._all_strategies[cls][key] = dec_cls
+ dec_cls._strategy_keys.append(key)
+ return dec_cls
+
+ return decorate
+
+ @classmethod
+ def _strategy_lookup(cls, requesting_property, *key):
+ requesting_property.parent._with_polymorphic_mappers
+
+ for prop_cls in cls.__mro__:
+ if prop_cls in cls._all_strategies:
+ strategies = cls._all_strategies[prop_cls]
+ try:
+ return strategies[key]
+ except KeyError:
+ pass
+
+ for property_type, strats in cls._all_strategies.items():
+ if key in strats:
+ intended_property_type = property_type
+ actual_strategy = strats[key]
+ break
+ else:
+ intended_property_type = None
+ actual_strategy = None
+
+ raise orm_exc.LoaderStrategyException(
+ cls,
+ requesting_property,
+ intended_property_type,
+ actual_strategy,
+ key,
+ )
+
+
+class ORMOption(ExecutableOption):
+ """Base class for option objects that are passed to ORM queries.
+
+ These options may be consumed by :meth:`.Query.options`,
+ :meth:`.Select.options`, or in a more general sense by any
+ :meth:`.Executable.options` method. They are interpreted at
+ statement compile time or execution time in modern use. The
+ deprecated :class:`.MapperOption` is consumed at ORM query construction
+ time.
+
+ .. versionadded:: 1.4
+
+ """
+
+ __slots__ = ()
+
+ _is_legacy_option = False
+
+ propagate_to_loaders = False
+ """if True, indicate this option should be carried along
+ to "secondary" SELECT statements that occur for relationship
+ lazy loaders as well as attribute load / refresh operations.
+
+ """
+
+ _is_compile_state = False
+
+ _is_criteria_option = False
+
+ _is_strategy_option = False
+
+
+class CompileStateOption(HasCacheKey, ORMOption):
+ """base for :class:`.ORMOption` classes that affect the compilation of
+ a SQL query and therefore need to be part of the cache key.
+
+ .. note:: :class:`.CompileStateOption` is generally non-public and
+ should not be used as a base class for user-defined options; instead,
+ use :class:`.UserDefinedOption`, which is easier to use as it does not
+ interact with ORM compilation internals or caching.
+
+ :class:`.CompileStateOption` defines an internal attribute
+ ``_is_compile_state=True`` which has the effect of the ORM compilation
+ routines for SELECT and other statements will call upon these options when
+ a SQL string is being compiled. As such, these classes implement
+ :class:`.HasCacheKey` and need to provide robust ``_cache_key_traversal``
+ structures.
+
+ The :class:`.CompileStateOption` class is used to implement the ORM
+ :class:`.LoaderOption` and :class:`.CriteriaOption` classes.
+
+ .. versionadded:: 1.4.28
+
+
+ """
+
+ _is_compile_state = True
+
+ def process_compile_state(self, compile_state):
+ """Apply a modification to a given :class:`.CompileState`."""
+
+ def process_compile_state_replaced_entities(
+ self, compile_state, mapper_entities
+ ):
+ """Apply a modification to a given :class:`.CompileState`,
+ given entities that were replaced by with_only_columns() or
+ with_entities().
+
+ .. versionadded:: 1.4.19
+
+ """
+
+
+class LoaderOption(CompileStateOption):
+ """Describe a loader modification to an ORM statement at compilation time.
+
+ .. versionadded:: 1.4
+
+ """
+
+ def process_compile_state_replaced_entities(
+ self, compile_state, mapper_entities
+ ):
+ """Apply a modification to a given :class:`.CompileState`,
+ given entities that were replaced by with_only_columns() or
+ with_entities().
+
+ .. versionadded:: 1.4.19
+
+ """
+ self.process_compile_state(compile_state)
+
+ def process_compile_state(self, compile_state):
+ """Apply a modification to a given :class:`.CompileState`."""
+
+
+class CriteriaOption(CompileStateOption):
+ """Describe a WHERE criteria modification to an ORM statement at
+ compilation time.
+
+ .. versionadded:: 1.4
+
+ """
+
+ _is_criteria_option = True
+
+ def process_compile_state(self, compile_state):
+ """Apply a modification to a given :class:`.CompileState`."""
+
+ def get_global_criteria(self, attributes):
+ """update additional entity criteria options in the given
+ attributes dictionary.
+
+ """
+
+
+class UserDefinedOption(ORMOption):
+ """Base class for a user-defined option that can be consumed from the
+ :meth:`.SessionEvents.do_orm_execute` event hook.
+
+ """
+
+ _is_legacy_option = False
+
+ propagate_to_loaders = False
+ """if True, indicate this option should be carried along
+ to "secondary" Query objects produced during lazy loads
+ or refresh operations.
+
+ """
+
+ def __init__(self, payload=None):
+ self.payload = payload
+
+
+@util.deprecated_cls(
+ "1.4",
+ "The :class:`.MapperOption class is deprecated and will be removed "
+ "in a future release. For "
+ "modifications to queries on a per-execution basis, use the "
+ ":class:`.UserDefinedOption` class to establish state within a "
+ ":class:`.Query` or other Core statement, then use the "
+ ":meth:`.SessionEvents.before_orm_execute` hook to consume them.",
+ constructor=None,
+)
+class MapperOption(ORMOption):
+ """Describe a modification to a Query"""
+
+ _is_legacy_option = True
+
+ propagate_to_loaders = False
+ """if True, indicate this option should be carried along
+ to "secondary" Query objects produced during lazy loads
+ or refresh operations.
+
+ """
+
+ def process_query(self, query):
+ """Apply a modification to the given :class:`_query.Query`."""
+
+ def process_query_conditionally(self, query):
+ """same as process_query(), except that this option may not
+ apply to the given query.
+
+ This is typically applied during a lazy load or scalar refresh
+ operation to propagate options stated in the original Query to the
+ new Query being used for the load. It occurs for those options that
+ specify propagate_to_loaders=True.
+
+ """
+
+ self.process_query(query)
+
+
+class LoaderStrategy(object):
+ """Describe the loading behavior of a StrategizedProperty object.
+
+ The ``LoaderStrategy`` interacts with the querying process in three
+ ways:
+
+ * it controls the configuration of the ``InstrumentedAttribute``
+ placed on a class to handle the behavior of the attribute. this
+ may involve setting up class-level callable functions to fire
+ off a select operation when the attribute is first accessed
+ (i.e. a lazy load)
+
+ * it processes the ``QueryContext`` at statement construction time,
+ where it can modify the SQL statement that is being produced.
+ For example, simple column attributes will add their represented
+ column to the list of selected columns, a joined eager loader
+ may establish join clauses to add to the statement.
+
+ * It produces "row processor" functions at result fetching time.
+ These "row processor" functions populate a particular attribute
+ on a particular mapped instance.
+
+ """
+
+ __slots__ = (
+ "parent_property",
+ "is_class_level",
+ "parent",
+ "key",
+ "strategy_key",
+ "strategy_opts",
+ )
+
+ def __init__(self, parent, strategy_key):
+ self.parent_property = parent
+ self.is_class_level = False
+ self.parent = self.parent_property.parent
+ self.key = self.parent_property.key
+ self.strategy_key = strategy_key
+ self.strategy_opts = dict(strategy_key)
+
+ def init_class_attribute(self, mapper):
+ pass
+
+ def setup_query(
+ self, compile_state, query_entity, path, loadopt, adapter, **kwargs
+ ):
+ """Establish column and other state for a given QueryContext.
+
+ This method fulfills the contract specified by MapperProperty.setup().
+
+ StrategizedProperty delegates its setup() method
+ directly to this method.
+
+ """
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ """Establish row processing functions for a given QueryContext.
+
+ This method fulfills the contract specified by
+ MapperProperty.create_row_processor().
+
+ StrategizedProperty delegates its create_row_processor() method
+ directly to this method.
+
+ """
+
+ def __str__(self):
+ return str(self.parent_property)
diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py
new file mode 100644
index 0000000..b5691c0
--- /dev/null
+++ b/lib/sqlalchemy/orm/loading.py
@@ -0,0 +1,1465 @@
+# orm/loading.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""private module containing functions used to convert database
+rows into object instances and associated state.
+
+the functions here are called primarily by Query, Mapper,
+as well as some of the attribute loading strategies.
+
+"""
+from __future__ import absolute_import
+
+from . import attributes
+from . import exc as orm_exc
+from . import path_registry
+from . import strategy_options
+from .base import _DEFER_FOR_STATE
+from .base import _RAISE_FOR_STATE
+from .base import _SET_DEFERRED_EXPIRED
+from .util import _none_set
+from .util import state_str
+from .. import exc as sa_exc
+from .. import future
+from .. import util
+from ..engine import result_tuple
+from ..engine.result import ChunkedIteratorResult
+from ..engine.result import FrozenResult
+from ..engine.result import SimpleResultMetaData
+from ..sql import util as sql_util
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+from ..sql.selectable import SelectState
+
+_new_runid = util.counter()
+
+
+def instances(cursor, context):
+ """Return a :class:`.Result` given an ORM query context.
+
+ :param cursor: a :class:`.CursorResult`, generated by a statement
+ which came from :class:`.ORMCompileState`
+
+ :param context: a :class:`.QueryContext` object
+
+ :return: a :class:`.Result` object representing ORM results
+
+ .. versionchanged:: 1.4 The instances() function now uses
+ :class:`.Result` objects and has an all new interface.
+
+ """
+
+ context.runid = _new_runid()
+ context.post_load_paths = {}
+
+ compile_state = context.compile_state
+ filtered = compile_state._has_mapper_entities
+ single_entity = (
+ not context.load_options._only_return_tuples
+ and len(compile_state._entities) == 1
+ and compile_state._entities[0].supports_single_entity
+ )
+
+ try:
+ (process, labels, extra) = list(
+ zip(
+ *[
+ query_entity.row_processor(context, cursor)
+ for query_entity in context.compile_state._entities
+ ]
+ )
+ )
+
+ if context.yield_per and (
+ context.loaders_require_buffering
+ or context.loaders_require_uniquing
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Can't use yield_per with eager loaders that require uniquing "
+ "or row buffering, e.g. joinedload() against collections "
+ "or subqueryload(). Consider the selectinload() strategy "
+ "for better flexibility in loading objects."
+ )
+
+ except Exception:
+ with util.safe_reraise():
+ cursor.close()
+
+ def _no_unique(entry):
+ raise sa_exc.InvalidRequestError(
+ "Can't use the ORM yield_per feature in conjunction with unique()"
+ )
+
+ def _not_hashable(datatype):
+ def go(obj):
+ raise sa_exc.InvalidRequestError(
+ "Can't apply uniqueness to row tuple containing value of "
+ "type %r; this datatype produces non-hashable values"
+ % datatype
+ )
+
+ return go
+
+ if context.load_options._legacy_uniquing:
+ unique_filters = [
+ _no_unique
+ if context.yield_per
+ else id
+ if (
+ ent.use_id_for_hash
+ or ent._non_hashable_value
+ or ent._null_column_type
+ )
+ else None
+ for ent in context.compile_state._entities
+ ]
+ else:
+ unique_filters = [
+ _no_unique
+ if context.yield_per
+ else _not_hashable(ent.column.type)
+ if (not ent.use_id_for_hash and ent._non_hashable_value)
+ else id
+ if ent.use_id_for_hash
+ else None
+ for ent in context.compile_state._entities
+ ]
+
+ row_metadata = SimpleResultMetaData(
+ labels, extra, _unique_filters=unique_filters
+ )
+
+ def chunks(size):
+ while True:
+ yield_per = size
+
+ context.partials = {}
+
+ if yield_per:
+ fetch = cursor.fetchmany(yield_per)
+
+ if not fetch:
+ break
+ else:
+ fetch = cursor._raw_all_rows()
+
+ if single_entity:
+ proc = process[0]
+ rows = [proc(row) for row in fetch]
+ else:
+ rows = [
+ tuple([proc(row) for proc in process]) for row in fetch
+ ]
+
+ for path, post_load in context.post_load_paths.items():
+ post_load.invoke(context, path)
+
+ yield rows
+
+ if not yield_per:
+ break
+
+ if context.execution_options.get("prebuffer_rows", False):
+ # this is a bit of a hack at the moment.
+ # I would rather have some option in the result to pre-buffer
+ # internally.
+ _prebuffered = list(chunks(None))
+
+ def chunks(size):
+ return iter(_prebuffered)
+
+ result = ChunkedIteratorResult(
+ row_metadata,
+ chunks,
+ source_supports_scalars=single_entity,
+ raw=cursor,
+ dynamic_yield_per=cursor.context._is_server_side,
+ )
+
+ # filtered and single_entity are used to indicate to legacy Query that the
+ # query has ORM entities, so legacy deduping and scalars should be called
+ # on the result.
+ result._attributes = result._attributes.union(
+ dict(filtered=filtered, is_single_entity=single_entity)
+ )
+
+ # multi_row_eager_loaders OTOH is specific to joinedload.
+ if context.compile_state.multi_row_eager_loaders:
+
+ def require_unique(obj):
+ raise sa_exc.InvalidRequestError(
+ "The unique() method must be invoked on this Result, "
+ "as it contains results that include joined eager loads "
+ "against collections"
+ )
+
+ result._unique_filter_state = (None, require_unique)
+
+ if context.yield_per:
+ result.yield_per(context.yield_per)
+
+ return result
+
+
+@util.preload_module("sqlalchemy.orm.context")
+def merge_frozen_result(session, statement, frozen_result, load=True):
+ """Merge a :class:`_engine.FrozenResult` back into a :class:`_orm.Session`,
+ returning a new :class:`_engine.Result` object with :term:`persistent`
+ objects.
+
+ See the section :ref:`do_orm_execute_re_executing` for an example.
+
+ .. seealso::
+
+ :ref:`do_orm_execute_re_executing`
+
+ :meth:`_engine.Result.freeze`
+
+ :class:`_engine.FrozenResult`
+
+ """
+ querycontext = util.preloaded.orm_context
+
+ if load:
+ # flush current contents if we expect to load data
+ session._autoflush()
+
+ ctx = querycontext.ORMSelectCompileState._create_entities_collection(
+ statement, legacy=False
+ )
+
+ autoflush = session.autoflush
+ try:
+ session.autoflush = False
+ mapped_entities = [
+ i
+ for i, e in enumerate(ctx._entities)
+ if isinstance(e, querycontext._MapperEntity)
+ ]
+ keys = [ent._label_name for ent in ctx._entities]
+
+ keyed_tuple = result_tuple(
+ keys, [ent._extra_entities for ent in ctx._entities]
+ )
+
+ result = []
+ for newrow in frozen_result.rewrite_rows():
+ for i in mapped_entities:
+ if newrow[i] is not None:
+ newrow[i] = session._merge(
+ attributes.instance_state(newrow[i]),
+ attributes.instance_dict(newrow[i]),
+ load=load,
+ _recursive={},
+ _resolve_conflict_map={},
+ )
+
+ result.append(keyed_tuple(newrow))
+
+ return frozen_result.with_new_rows(result)
+ finally:
+ session.autoflush = autoflush
+
+
+@util.deprecated_20(
+ ":func:`_orm.merge_result`",
+ alternative="The function as well as the method on :class:`_orm.Query` "
+ "is superseded by the :func:`_orm.merge_frozen_result` function.",
+ becomes_legacy=True,
+)
+@util.preload_module("sqlalchemy.orm.context")
+def merge_result(query, iterator, load=True):
+ """Merge a result into the given :class:`.Query` object's Session.
+
+ See :meth:`_orm.Query.merge_result` for top-level documentation on this
+ function.
+
+ """
+
+ querycontext = util.preloaded.orm_context
+
+ session = query.session
+ if load:
+ # flush current contents if we expect to load data
+ session._autoflush()
+
+ # TODO: need test coverage and documentation for the FrozenResult
+ # use case.
+ if isinstance(iterator, FrozenResult):
+ frozen_result = iterator
+ iterator = iter(frozen_result.data)
+ else:
+ frozen_result = None
+
+ ctx = querycontext.ORMSelectCompileState._create_entities_collection(
+ query, legacy=True
+ )
+
+ autoflush = session.autoflush
+ try:
+ session.autoflush = False
+ single_entity = not frozen_result and len(ctx._entities) == 1
+
+ if single_entity:
+ if isinstance(ctx._entities[0], querycontext._MapperEntity):
+ result = [
+ session._merge(
+ attributes.instance_state(instance),
+ attributes.instance_dict(instance),
+ load=load,
+ _recursive={},
+ _resolve_conflict_map={},
+ )
+ for instance in iterator
+ ]
+ else:
+ result = list(iterator)
+ else:
+ mapped_entities = [
+ i
+ for i, e in enumerate(ctx._entities)
+ if isinstance(e, querycontext._MapperEntity)
+ ]
+ result = []
+ keys = [ent._label_name for ent in ctx._entities]
+
+ keyed_tuple = result_tuple(
+ keys, [ent._extra_entities for ent in ctx._entities]
+ )
+
+ for row in iterator:
+ newrow = list(row)
+ for i in mapped_entities:
+ if newrow[i] is not None:
+ newrow[i] = session._merge(
+ attributes.instance_state(newrow[i]),
+ attributes.instance_dict(newrow[i]),
+ load=load,
+ _recursive={},
+ _resolve_conflict_map={},
+ )
+ result.append(keyed_tuple(newrow))
+
+ if frozen_result:
+ return frozen_result.with_data(result)
+ else:
+ return iter(result)
+ finally:
+ session.autoflush = autoflush
+
+
+def get_from_identity(session, mapper, key, passive):
+ """Look up the given key in the given session's identity map,
+ check the object for expired state if found.
+
+ """
+ instance = session.identity_map.get(key)
+ if instance is not None:
+
+ state = attributes.instance_state(instance)
+
+ if mapper.inherits and not state.mapper.isa(mapper):
+ return attributes.PASSIVE_CLASS_MISMATCH
+
+ # expired - ensure it still exists
+ if state.expired:
+ if not passive & attributes.SQL_OK:
+ # TODO: no coverage here
+ return attributes.PASSIVE_NO_RESULT
+ elif not passive & attributes.RELATED_OBJECT_OK:
+ # this mode is used within a flush and the instance's
+ # expired state will be checked soon enough, if necessary.
+ # also used by immediateloader for a mutually-dependent
+ # o2m->m2m load, :ticket:`6301`
+ return instance
+ try:
+ state._load_expired(state, passive)
+ except orm_exc.ObjectDeletedError:
+ session._remove_newly_deleted([state])
+ return None
+ return instance
+ else:
+ return None
+
+
+def load_on_ident(
+ session,
+ statement,
+ key,
+ load_options=None,
+ refresh_state=None,
+ with_for_update=None,
+ only_load_props=None,
+ no_autoflush=False,
+ bind_arguments=util.EMPTY_DICT,
+ execution_options=util.EMPTY_DICT,
+):
+ """Load the given identity key from the database."""
+ if key is not None:
+ ident = key[1]
+ identity_token = key[2]
+ else:
+ ident = identity_token = None
+
+ return load_on_pk_identity(
+ session,
+ statement,
+ ident,
+ load_options=load_options,
+ refresh_state=refresh_state,
+ with_for_update=with_for_update,
+ only_load_props=only_load_props,
+ identity_token=identity_token,
+ no_autoflush=no_autoflush,
+ bind_arguments=bind_arguments,
+ execution_options=execution_options,
+ )
+
+
+def load_on_pk_identity(
+ session,
+ statement,
+ primary_key_identity,
+ load_options=None,
+ refresh_state=None,
+ with_for_update=None,
+ only_load_props=None,
+ identity_token=None,
+ no_autoflush=False,
+ bind_arguments=util.EMPTY_DICT,
+ execution_options=util.EMPTY_DICT,
+):
+
+ """Load the given primary key identity from the database."""
+
+ query = statement
+ q = query._clone()
+
+ assert not q._is_lambda_element
+
+ # TODO: fix these imports ....
+ from .context import QueryContext, ORMCompileState
+
+ if load_options is None:
+ load_options = QueryContext.default_load_options
+
+ if (
+ statement._compile_options
+ is SelectState.default_select_compile_options
+ ):
+ compile_options = ORMCompileState.default_compile_options
+ else:
+ compile_options = statement._compile_options
+
+ if primary_key_identity is not None:
+ mapper = query._propagate_attrs["plugin_subject"]
+
+ (_get_clause, _get_params) = mapper._get_clause
+
+ # None present in ident - turn those comparisons
+ # into "IS NULL"
+ if None in primary_key_identity:
+ nones = set(
+ [
+ _get_params[col].key
+ for col, value in zip(
+ mapper.primary_key, primary_key_identity
+ )
+ if value is None
+ ]
+ )
+
+ _get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
+
+ if len(nones) == len(primary_key_identity):
+ util.warn(
+ "fully NULL primary key identity cannot load any "
+ "object. This condition may raise an error in a future "
+ "release."
+ )
+
+ q._where_criteria = (
+ sql_util._deep_annotate(_get_clause, {"_orm_adapt": True}),
+ )
+
+ params = dict(
+ [
+ (_get_params[primary_key].key, id_val)
+ for id_val, primary_key in zip(
+ primary_key_identity, mapper.primary_key
+ )
+ ]
+ )
+ else:
+ params = None
+
+ if with_for_update is not None:
+ version_check = True
+ q._for_update_arg = with_for_update
+ elif query._for_update_arg is not None:
+ version_check = True
+ q._for_update_arg = query._for_update_arg
+ else:
+ version_check = False
+
+ if refresh_state and refresh_state.load_options:
+ compile_options += {"_current_path": refresh_state.load_path.parent}
+ q = q.options(*refresh_state.load_options)
+
+ new_compile_options, load_options = _set_get_options(
+ compile_options,
+ load_options,
+ version_check=version_check,
+ only_load_props=only_load_props,
+ refresh_state=refresh_state,
+ identity_token=identity_token,
+ )
+ q._compile_options = new_compile_options
+ q._order_by = None
+
+ if no_autoflush:
+ load_options += {"_autoflush": False}
+
+ execution_options = util.EMPTY_DICT.merge_with(
+ execution_options, {"_sa_orm_load_options": load_options}
+ )
+ result = (
+ session.execute(
+ q,
+ params=params,
+ execution_options=execution_options,
+ bind_arguments=bind_arguments,
+ )
+ .unique()
+ .scalars()
+ )
+
+ try:
+ return result.one()
+ except orm_exc.NoResultFound:
+ return None
+
+
+def _set_get_options(
+ compile_opt,
+ load_opt,
+ populate_existing=None,
+ version_check=None,
+ only_load_props=None,
+ refresh_state=None,
+ identity_token=None,
+):
+
+ compile_options = {}
+ load_options = {}
+ if version_check:
+ load_options["_version_check"] = version_check
+ if populate_existing:
+ load_options["_populate_existing"] = populate_existing
+ if refresh_state:
+ load_options["_refresh_state"] = refresh_state
+ compile_options["_for_refresh_state"] = True
+ if only_load_props:
+ compile_options["_only_load_props"] = frozenset(only_load_props)
+ if identity_token:
+ load_options["_refresh_identity_token"] = identity_token
+
+ if load_options:
+ load_opt += load_options
+ if compile_options:
+ compile_opt += compile_options
+
+ return compile_opt, load_opt
+
+
+def _setup_entity_query(
+ compile_state,
+ mapper,
+ query_entity,
+ path,
+ adapter,
+ column_collection,
+ with_polymorphic=None,
+ only_load_props=None,
+ polymorphic_discriminator=None,
+ **kw
+):
+
+ if with_polymorphic:
+ poly_properties = mapper._iterate_polymorphic_properties(
+ with_polymorphic
+ )
+ else:
+ poly_properties = mapper._polymorphic_properties
+
+ quick_populators = {}
+
+ path.set(compile_state.attributes, "memoized_setups", quick_populators)
+
+ # for the lead entities in the path, e.g. not eager loads, and
+ # assuming a user-passed aliased class, e.g. not a from_self() or any
+ # implicit aliasing, don't add columns to the SELECT that aren't
+ # in the thing that's aliased.
+ check_for_adapt = adapter and len(path) == 1 and path[-1].is_aliased_class
+
+ for value in poly_properties:
+ if only_load_props and value.key not in only_load_props:
+ continue
+
+ value.setup(
+ compile_state,
+ query_entity,
+ path,
+ adapter,
+ only_load_props=only_load_props,
+ column_collection=column_collection,
+ memoized_populators=quick_populators,
+ check_for_adapt=check_for_adapt,
+ **kw
+ )
+
+ if (
+ polymorphic_discriminator is not None
+ and polymorphic_discriminator is not mapper.polymorphic_on
+ ):
+
+ if adapter:
+ pd = adapter.columns[polymorphic_discriminator]
+ else:
+ pd = polymorphic_discriminator
+ column_collection.append(pd)
+
+
+def _warn_for_runid_changed(state):
+ util.warn(
+ "Loading context for %s has changed within a load/refresh "
+ "handler, suggesting a row refresh operation took place. If this "
+ "event handler is expected to be "
+ "emitting row refresh operations within an existing load or refresh "
+ "operation, set restore_load_context=True when establishing the "
+ "listener to ensure the context remains unchanged when the event "
+ "handler completes." % (state_str(state),)
+ )
+
+
+def _instance_processor(
+ query_entity,
+ mapper,
+ context,
+ result,
+ path,
+ adapter,
+ only_load_props=None,
+ refresh_state=None,
+ polymorphic_discriminator=None,
+ _polymorphic_from=None,
+):
+ """Produce a mapper level row processor callable
+ which processes rows into mapped instances."""
+
+ # note that this method, most of which exists in a closure
+ # called _instance(), resists being broken out, as
+ # attempts to do so tend to add significant function
+ # call overhead. _instance() is the most
+ # performance-critical section in the whole ORM.
+
+ identity_class = mapper._identity_class
+ compile_state = context.compile_state
+
+ # look for "row getter" functions that have been assigned along
+ # with the compile state that were cached from a previous load.
+ # these are operator.itemgetter() objects that each will extract a
+ # particular column from each row.
+
+ getter_key = ("getters", mapper)
+ getters = path.get(compile_state.attributes, getter_key, None)
+
+ if getters is None:
+ # no getters, so go through a list of attributes we are loading for,
+ # and the ones that are column based will have already put information
+ # for us in another collection "memoized_setups", which represents the
+ # output of the LoaderStrategy.setup_query() method. We can just as
+ # easily call LoaderStrategy.create_row_processor for each, but by
+ # getting it all at once from setup_query we save another method call
+ # per attribute.
+ props = mapper._prop_set
+ if only_load_props is not None:
+ props = props.intersection(
+ mapper._props[k] for k in only_load_props
+ )
+
+ quick_populators = path.get(
+ context.attributes, "memoized_setups", _none_set
+ )
+
+ todo = []
+ cached_populators = {
+ "new": [],
+ "quick": [],
+ "deferred": [],
+ "expire": [],
+ "delayed": [],
+ "existing": [],
+ "eager": [],
+ }
+
+ if refresh_state is None:
+ # we can also get the "primary key" tuple getter function
+ pk_cols = mapper.primary_key
+
+ if adapter:
+ pk_cols = [adapter.columns[c] for c in pk_cols]
+ primary_key_getter = result._tuple_getter(pk_cols)
+ else:
+ primary_key_getter = None
+
+ getters = {
+ "cached_populators": cached_populators,
+ "todo": todo,
+ "primary_key_getter": primary_key_getter,
+ }
+ for prop in props:
+ if prop in quick_populators:
+ # this is an inlined path just for column-based attributes.
+ col = quick_populators[prop]
+ if col is _DEFER_FOR_STATE:
+ cached_populators["new"].append(
+ (prop.key, prop._deferred_column_loader)
+ )
+ elif col is _SET_DEFERRED_EXPIRED:
+ # note that in this path, we are no longer
+ # searching in the result to see if the column might
+ # be present in some unexpected way.
+ cached_populators["expire"].append((prop.key, False))
+ elif col is _RAISE_FOR_STATE:
+ cached_populators["new"].append(
+ (prop.key, prop._raise_column_loader)
+ )
+ else:
+ getter = None
+ if adapter:
+ # this logic had been removed for all 1.4 releases
+ # up until 1.4.18; the adapter here is particularly
+ # the compound eager adapter which isn't accommodated
+ # in the quick_populators right now. The "fallback"
+ # logic below instead took over in many more cases
+ # until issue #6596 was identified.
+
+ # note there is still an issue where this codepath
+ # produces no "getter" for cases where a joined-inh
+ # mapping includes a labeled column property, meaning
+ # KeyError is caught internally and we fall back to
+ # _getter(col), which works anyway. The adapter
+ # here for joined inh without any aliasing might not
+ # be useful. Tests which see this include
+ # test.orm.inheritance.test_basic ->
+ # EagerTargetingTest.test_adapt_stringency
+ # OptimizedLoadTest.test_column_expression_joined
+ # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa: E501
+ #
+
+ adapted_col = adapter.columns[col]
+ if adapted_col is not None:
+ getter = result._getter(adapted_col, False)
+ if not getter:
+ getter = result._getter(col, False)
+ if getter:
+ cached_populators["quick"].append((prop.key, getter))
+ else:
+ # fall back to the ColumnProperty itself, which
+ # will iterate through all of its columns
+ # to see if one fits
+ prop.create_row_processor(
+ context,
+ query_entity,
+ path,
+ mapper,
+ result,
+ adapter,
+ cached_populators,
+ )
+ else:
+ # loader strategies like subqueryload, selectinload,
+ # joinedload, basically relationships, these need to interact
+ # with the context each time to work correctly.
+ todo.append(prop)
+
+ path.set(compile_state.attributes, getter_key, getters)
+
+ cached_populators = getters["cached_populators"]
+
+ populators = {key: list(value) for key, value in cached_populators.items()}
+ for prop in getters["todo"]:
+ prop.create_row_processor(
+ context, query_entity, path, mapper, result, adapter, populators
+ )
+
+ propagated_loader_options = context.propagated_loader_options
+ load_path = (
+ context.compile_state.current_path + path
+ if context.compile_state.current_path.path
+ else path
+ )
+
+ session_identity_map = context.session.identity_map
+
+ populate_existing = context.populate_existing or mapper.always_refresh
+ load_evt = bool(mapper.class_manager.dispatch.load)
+ refresh_evt = bool(mapper.class_manager.dispatch.refresh)
+ persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
+ if persistent_evt:
+ loaded_as_persistent = context.session.dispatch.loaded_as_persistent
+ instance_state = attributes.instance_state
+ instance_dict = attributes.instance_dict
+ session_id = context.session.hash_key
+ runid = context.runid
+ identity_token = context.identity_token
+
+ version_check = context.version_check
+ if version_check:
+ version_id_col = mapper.version_id_col
+ if version_id_col is not None:
+ if adapter:
+ version_id_col = adapter.columns[version_id_col]
+ version_id_getter = result._getter(version_id_col)
+ else:
+ version_id_getter = None
+
+ if not refresh_state and _polymorphic_from is not None:
+ key = ("loader", path.path)
+ if key in context.attributes and context.attributes[key].strategy == (
+ ("selectinload_polymorphic", True),
+ ):
+ selectin_load_via = mapper._should_selectin_load(
+ context.attributes[key].local_opts["entities"],
+ _polymorphic_from,
+ )
+ else:
+ selectin_load_via = mapper._should_selectin_load(
+ None, _polymorphic_from
+ )
+
+ if selectin_load_via and selectin_load_via is not _polymorphic_from:
+ # only_load_props goes w/ refresh_state only, and in a refresh
+ # we are a single row query for the exact entity; polymorphic
+ # loading does not apply
+ assert only_load_props is None
+
+ callable_ = _load_subclass_via_in(context, path, selectin_load_via)
+
+ PostLoad.callable_for_path(
+ context,
+ load_path,
+ selectin_load_via.mapper,
+ selectin_load_via,
+ callable_,
+ selectin_load_via,
+ )
+
+ post_load = PostLoad.for_context(context, load_path, only_load_props)
+
+ if refresh_state:
+ refresh_identity_key = refresh_state.key
+ if refresh_identity_key is None:
+ # super-rare condition; a refresh is being called
+ # on a non-instance-key instance; this is meant to only
+ # occur within a flush()
+ refresh_identity_key = mapper._identity_key_from_state(
+ refresh_state
+ )
+ else:
+ refresh_identity_key = None
+
+ primary_key_getter = getters["primary_key_getter"]
+
+ if mapper.allow_partial_pks:
+ is_not_primary_key = _none_set.issuperset
+ else:
+ is_not_primary_key = _none_set.intersection
+
+ def _instance(row):
+
+ # determine the state that we'll be populating
+ if refresh_identity_key:
+ # fixed state that we're refreshing
+ state = refresh_state
+ instance = state.obj()
+ dict_ = instance_dict(instance)
+ isnew = state.runid != runid
+ currentload = True
+ loaded_instance = False
+ else:
+ # look at the row, see if that identity is in the
+ # session, or we have to create a new one
+ identitykey = (
+ identity_class,
+ primary_key_getter(row),
+ identity_token,
+ )
+
+ instance = session_identity_map.get(identitykey)
+
+ if instance is not None:
+ # existing instance
+ state = instance_state(instance)
+ dict_ = instance_dict(instance)
+
+ isnew = state.runid != runid
+ currentload = not isnew
+ loaded_instance = False
+
+ if version_check and version_id_getter and not currentload:
+ _validate_version_id(
+ mapper, state, dict_, row, version_id_getter
+ )
+
+ else:
+ # create a new instance
+
+ # check for non-NULL values in the primary key columns,
+ # else no entity is returned for the row
+ if is_not_primary_key(identitykey[1]):
+ return None
+
+ isnew = True
+ currentload = True
+ loaded_instance = True
+
+ instance = mapper.class_manager.new_instance()
+
+ dict_ = instance_dict(instance)
+ state = instance_state(instance)
+ state.key = identitykey
+ state.identity_token = identity_token
+
+ # attach instance to session.
+ state.session_id = session_id
+ session_identity_map._add_unpresent(state, identitykey)
+
+ effective_populate_existing = populate_existing
+ if refresh_state is state:
+ effective_populate_existing = True
+
+ # populate. this looks at whether this state is new
+ # for this load or was existing, and whether or not this
+ # row is the first row with this identity.
+ if currentload or effective_populate_existing:
+ # full population routines. Objects here are either
+ # just created, or we are doing a populate_existing
+
+ # be conservative about setting load_path when populate_existing
+ # is in effect; want to maintain options from the original
+ # load. see test_expire->test_refresh_maintains_deferred_options
+ if isnew and (
+ propagated_loader_options or not effective_populate_existing
+ ):
+ state.load_options = propagated_loader_options
+ state.load_path = load_path
+
+ _populate_full(
+ context,
+ row,
+ state,
+ dict_,
+ isnew,
+ load_path,
+ loaded_instance,
+ effective_populate_existing,
+ populators,
+ )
+
+ if isnew:
+ # state.runid should be equal to context.runid / runid
+ # here, however for event checks we are being more conservative
+ # and checking against existing run id
+ # assert state.runid == runid
+
+ existing_runid = state.runid
+
+ if loaded_instance:
+ if load_evt:
+ state.manager.dispatch.load(state, context)
+ if state.runid != existing_runid:
+ _warn_for_runid_changed(state)
+ if persistent_evt:
+ loaded_as_persistent(context.session, state)
+ if state.runid != existing_runid:
+ _warn_for_runid_changed(state)
+ elif refresh_evt:
+ state.manager.dispatch.refresh(
+ state, context, only_load_props
+ )
+ if state.runid != runid:
+ _warn_for_runid_changed(state)
+
+ if effective_populate_existing or state.modified:
+ if refresh_state and only_load_props:
+ state._commit(dict_, only_load_props)
+ else:
+ state._commit_all(dict_, session_identity_map)
+
+ if post_load:
+ post_load.add_state(state, True)
+
+ else:
+ # partial population routines, for objects that were already
+ # in the Session, but a row matches them; apply eager loaders
+ # on existing objects, etc.
+ unloaded = state.unloaded
+ isnew = state not in context.partials
+
+ if not isnew or unloaded or populators["eager"]:
+ # state is having a partial set of its attributes
+ # refreshed. Populate those attributes,
+ # and add to the "context.partials" collection.
+
+ to_load = _populate_partial(
+ context,
+ row,
+ state,
+ dict_,
+ isnew,
+ load_path,
+ unloaded,
+ populators,
+ )
+
+ if isnew:
+ if refresh_evt:
+ existing_runid = state.runid
+ state.manager.dispatch.refresh(state, context, to_load)
+ if state.runid != existing_runid:
+ _warn_for_runid_changed(state)
+
+ state._commit(dict_, to_load)
+
+ if post_load and context.invoke_all_eagers:
+ post_load.add_state(state, False)
+
+ return instance
+
+ if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
+ # if we are doing polymorphic, dispatch to a different _instance()
+ # method specific to the subclass mapper
+ def ensure_no_pk(row):
+ identitykey = (
+ identity_class,
+ primary_key_getter(row),
+ identity_token,
+ )
+ if not is_not_primary_key(identitykey[1]):
+ return identitykey
+ else:
+ return None
+
+ _instance = _decorate_polymorphic_switch(
+ _instance,
+ context,
+ query_entity,
+ mapper,
+ result,
+ path,
+ polymorphic_discriminator,
+ adapter,
+ ensure_no_pk,
+ )
+
+ return _instance
+
+
+def _load_subclass_via_in(context, path, entity):
+ mapper = entity.mapper
+
+ zero_idx = len(mapper.base_mapper.primary_key) == 1
+
+ if entity.is_aliased_class:
+ q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
+ else:
+ q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
+
+ def do_load(context, path, states, load_only, effective_entity):
+ orig_query = context.query
+
+ options = (enable_opt,) + orig_query._with_options + (disable_opt,)
+ q2 = q.options(*options)
+
+ q2._compile_options = context.compile_state.default_compile_options
+ q2._compile_options += {"_current_path": path.parent}
+
+ if context.populate_existing:
+ q2 = q2.execution_options(populate_existing=True)
+
+ context.session.execute(
+ q2,
+ dict(
+ primary_keys=[
+ state.key[1][0] if zero_idx else state.key[1]
+ for state, load_attrs in states
+ ]
+ ),
+ ).unique().scalars().all()
+
+ return do_load
+
+
+def _populate_full(
+ context,
+ row,
+ state,
+ dict_,
+ isnew,
+ load_path,
+ loaded_instance,
+ populate_existing,
+ populators,
+):
+ if isnew:
+ # first time we are seeing a row with this identity.
+ state.runid = context.runid
+
+ for key, getter in populators["quick"]:
+ dict_[key] = getter(row)
+ if populate_existing:
+ for key, set_callable in populators["expire"]:
+ dict_.pop(key, None)
+ if set_callable:
+ state.expired_attributes.add(key)
+ else:
+ for key, set_callable in populators["expire"]:
+ if set_callable:
+ state.expired_attributes.add(key)
+
+ for key, populator in populators["new"]:
+ populator(state, dict_, row)
+ for key, populator in populators["delayed"]:
+ populator(state, dict_, row)
+ elif load_path != state.load_path:
+ # new load path, e.g. object is present in more than one
+ # column position in a series of rows
+ state.load_path = load_path
+
+ # if we have data, and the data isn't in the dict, OK, let's put
+ # it in.
+ for key, getter in populators["quick"]:
+ if key not in dict_:
+ dict_[key] = getter(row)
+
+ # otherwise treat like an "already seen" row
+ for key, populator in populators["existing"]:
+ populator(state, dict_, row)
+ # TODO: allow "existing" populator to know this is
+ # a new path for the state:
+ # populator(state, dict_, row, new_path=True)
+
+ else:
+ # have already seen rows with this identity in this same path.
+ for key, populator in populators["existing"]:
+ populator(state, dict_, row)
+
+ # TODO: same path
+ # populator(state, dict_, row, new_path=False)
+
+
+def _populate_partial(
+ context, row, state, dict_, isnew, load_path, unloaded, populators
+):
+
+ if not isnew:
+ to_load = context.partials[state]
+ for key, populator in populators["existing"]:
+ if key in to_load:
+ populator(state, dict_, row)
+ else:
+ to_load = unloaded
+ context.partials[state] = to_load
+
+ for key, getter in populators["quick"]:
+ if key in to_load:
+ dict_[key] = getter(row)
+ for key, set_callable in populators["expire"]:
+ if key in to_load:
+ dict_.pop(key, None)
+ if set_callable:
+ state.expired_attributes.add(key)
+ for key, populator in populators["new"]:
+ if key in to_load:
+ populator(state, dict_, row)
+ for key, populator in populators["delayed"]:
+ if key in to_load:
+ populator(state, dict_, row)
+ for key, populator in populators["eager"]:
+ if key not in unloaded:
+ populator(state, dict_, row)
+
+ return to_load
+
+
+def _validate_version_id(mapper, state, dict_, row, getter):
+
+ if mapper._get_state_attr_by_column(
+ state, dict_, mapper.version_id_col
+ ) != getter(row):
+ raise orm_exc.StaleDataError(
+ "Instance '%s' has version id '%s' which "
+ "does not match database-loaded version id '%s'."
+ % (
+ state_str(state),
+ mapper._get_state_attr_by_column(
+ state, dict_, mapper.version_id_col
+ ),
+ getter(row),
+ )
+ )
+
+
+def _decorate_polymorphic_switch(
+ instance_fn,
+ context,
+ query_entity,
+ mapper,
+ result,
+ path,
+ polymorphic_discriminator,
+ adapter,
+ ensure_no_pk,
+):
+ if polymorphic_discriminator is not None:
+ polymorphic_on = polymorphic_discriminator
+ else:
+ polymorphic_on = mapper.polymorphic_on
+ if polymorphic_on is None:
+ return instance_fn
+
+ if adapter:
+ polymorphic_on = adapter.columns[polymorphic_on]
+
+ def configure_subclass_mapper(discriminator):
+ try:
+ sub_mapper = mapper.polymorphic_map[discriminator]
+ except KeyError:
+ raise AssertionError(
+ "No such polymorphic_identity %r is defined" % discriminator
+ )
+ else:
+ if sub_mapper is mapper:
+ return None
+ elif not sub_mapper.isa(mapper):
+ return False
+
+ return _instance_processor(
+ query_entity,
+ sub_mapper,
+ context,
+ result,
+ path,
+ adapter,
+ _polymorphic_from=mapper,
+ )
+
+ polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
+
+ getter = result._getter(polymorphic_on)
+
+ def polymorphic_instance(row):
+ discriminator = getter(row)
+ if discriminator is not None:
+ _instance = polymorphic_instances[discriminator]
+ if _instance:
+ return _instance(row)
+ elif _instance is False:
+ identitykey = ensure_no_pk(row)
+
+ if identitykey:
+ raise sa_exc.InvalidRequestError(
+ "Row with identity key %s can't be loaded into an "
+ "object; the polymorphic discriminator column '%s' "
+ "refers to %s, which is not a sub-mapper of "
+ "the requested %s"
+ % (
+ identitykey,
+ polymorphic_on,
+ mapper.polymorphic_map[discriminator],
+ mapper,
+ )
+ )
+ else:
+ return None
+ else:
+ return instance_fn(row)
+ else:
+ identitykey = ensure_no_pk(row)
+
+ if identitykey:
+ raise sa_exc.InvalidRequestError(
+ "Row with identity key %s can't be loaded into an "
+ "object; the polymorphic discriminator column '%s' is "
+ "NULL" % (identitykey, polymorphic_on)
+ )
+ else:
+ return None
+
+ return polymorphic_instance
+
+
+class PostLoad(object):
+ """Track loaders and states for "post load" operations."""
+
+ __slots__ = "loaders", "states", "load_keys"
+
+ def __init__(self):
+ self.loaders = {}
+ self.states = util.OrderedDict()
+ self.load_keys = None
+
+ def add_state(self, state, overwrite):
+ # the states for a polymorphic load here are all shared
+ # within a single PostLoad object among multiple subtypes.
+ # Filtering of callables on a per-subclass basis needs to be done at
+ # the invocation level
+ self.states[state] = overwrite
+
+ def invoke(self, context, path):
+ if not self.states:
+ return
+ path = path_registry.PathRegistry.coerce(path)
+ for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
+ states = [
+ (state, overwrite)
+ for state, overwrite in self.states.items()
+ if state.manager.mapper.isa(limit_to_mapper)
+ ]
+ if states:
+ loader(context, path, states, self.load_keys, *arg, **kw)
+ self.states.clear()
+
+ @classmethod
+ def for_context(cls, context, path, only_load_props):
+ pl = context.post_load_paths.get(path.path)
+ if pl is not None and only_load_props:
+ pl.load_keys = only_load_props
+ return pl
+
+ @classmethod
+ def path_exists(self, context, path, key):
+ return (
+ path.path in context.post_load_paths
+ and key in context.post_load_paths[path.path].loaders
+ )
+
+ @classmethod
+ def callable_for_path(
+ cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
+ ):
+ if path.path in context.post_load_paths:
+ pl = context.post_load_paths[path.path]
+ else:
+ pl = context.post_load_paths[path.path] = PostLoad()
+ pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
+
+
+def load_scalar_attributes(mapper, state, attribute_names, passive):
+ """initiate a column-based attribute refresh operation."""
+
+ # assert mapper is _state_mapper(state)
+ session = state.session
+ if not session:
+ raise orm_exc.DetachedInstanceError(
+ "Instance %s is not bound to a Session; "
+ "attribute refresh operation cannot proceed" % (state_str(state))
+ )
+
+ has_key = bool(state.key)
+
+ result = False
+
+ no_autoflush = (
+ bool(passive & attributes.NO_AUTOFLUSH) or state.session.autocommit
+ )
+
+ # in the case of inheritance, particularly concrete and abstract
+ # concrete inheritance, the class manager might have some keys
+ # of attributes on the superclass that we didn't actually map.
+ # These could be mapped as "concrete, don't load" or could be completely
+ # excluded from the mapping and we know nothing about them. Filter them
+ # here to prevent them from coming through.
+ if attribute_names:
+ attribute_names = attribute_names.intersection(mapper.attrs.keys())
+
+ if mapper.inherits and not mapper.concrete:
+ # because we are using Core to produce a select() that we
+ # pass to the Query, we aren't calling setup() for mapped
+ # attributes; in 1.0 this means deferred attrs won't get loaded
+ # by default
+ statement = mapper._optimized_get_statement(state, attribute_names)
+ if statement is not None:
+ # this was previously aliased(mapper, statement), however,
+ # statement is a select() and Query's coercion now raises for this
+ # since you can't "select" from a "SELECT" statement. only
+ # from_statement() allows this.
+ # note: using from_statement() here means there is an adaption
+ # with adapt_on_names set up. the other option is to make the
+ # aliased() against a subquery which affects the SQL.
+
+ from .query import FromStatement
+
+ stmt = FromStatement(mapper, statement).options(
+ strategy_options.Load(mapper).undefer("*")
+ )
+
+ result = load_on_ident(
+ session,
+ stmt,
+ None,
+ only_load_props=attribute_names,
+ refresh_state=state,
+ no_autoflush=no_autoflush,
+ )
+
+ if result is False:
+ if has_key:
+ identity_key = state.key
+ else:
+ # this codepath is rare - only valid when inside a flush, and the
+ # object is becoming persistent but hasn't yet been assigned
+ # an identity_key.
+ # check here to ensure we have the attrs we need.
+ pk_attrs = [
+ mapper._columntoproperty[col].key for col in mapper.primary_key
+ ]
+ if state.expired_attributes.intersection(pk_attrs):
+ raise sa_exc.InvalidRequestError(
+ "Instance %s cannot be refreshed - it's not "
+ " persistent and does not "
+ "contain a full primary key." % state_str(state)
+ )
+ identity_key = mapper._identity_key_from_state(state)
+
+ if (
+ _none_set.issubset(identity_key) and not mapper.allow_partial_pks
+ ) or _none_set.issuperset(identity_key):
+ util.warn_limited(
+ "Instance %s to be refreshed doesn't "
+ "contain a full primary key - can't be refreshed "
+ "(and shouldn't be expired, either).",
+ state_str(state),
+ )
+ return
+
+ result = load_on_ident(
+ session,
+ future.select(mapper).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ ),
+ identity_key,
+ refresh_state=state,
+ only_load_props=attribute_names,
+ no_autoflush=no_autoflush,
+ )
+
+ # if instance is pending, a refresh operation
+ # may not complete (even if PK attributes are assigned)
+ if has_key and result is None:
+ raise orm_exc.ObjectDeletedError(state)
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
new file mode 100644
index 0000000..ed221a9
--- /dev/null
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -0,0 +1,3658 @@
+# orm/mapper.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Logic to map Python classes to and from selectables.
+
+Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
+configurational unit which associates a class with a database table.
+
+This is a semi-private module; the main configurational API of the ORM is
+available in :class:`~sqlalchemy.orm.`.
+
+"""
+from __future__ import absolute_import
+
+from collections import deque
+from itertools import chain
+import sys
+import weakref
+
+from . import attributes
+from . import exc as orm_exc
+from . import instrumentation
+from . import loading
+from . import properties
+from . import util as orm_util
+from .base import _class_to_mapper
+from .base import _state_mapper
+from .base import class_mapper
+from .base import state_str
+from .interfaces import _MappedAttribute
+from .interfaces import EXT_SKIP
+from .interfaces import InspectionAttr
+from .interfaces import MapperProperty
+from .interfaces import ORMEntityColumnsClauseRole
+from .interfaces import ORMFromClauseRole
+from .interfaces import StrategizedProperty
+from .path_registry import PathRegistry
+from .. import event
+from .. import exc as sa_exc
+from .. import inspection
+from .. import log
+from .. import schema
+from .. import sql
+from .. import util
+from ..sql import base as sql_base
+from ..sql import coercions
+from ..sql import expression
+from ..sql import operators
+from ..sql import roles
+from ..sql import util as sql_util
+from ..sql import visitors
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+from ..util import HasMemoized
+
+_mapper_registries = weakref.WeakKeyDictionary()
+
+_legacy_registry = None
+
+
+def _all_registries():
+ with _CONFIGURE_MUTEX:
+ return set(_mapper_registries)
+
+
+def _unconfigured_mappers():
+ for reg in _all_registries():
+ for mapper in reg._mappers_to_configure():
+ yield mapper
+
+
+_already_compiling = False
+
+
+# a constant returned by _get_attr_by_column to indicate
+# this mapper is not handling an attribute for a particular
+# column
+NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE")
+
+# lock used to synchronize the "mapper configure" step
+_CONFIGURE_MUTEX = util.threading.RLock()
+
+
+@inspection._self_inspects
+@log.class_logger
+class Mapper(
+ ORMFromClauseRole,
+ ORMEntityColumnsClauseRole,
+ sql_base.MemoizedHasCacheKey,
+ InspectionAttr,
+):
+ """Defines an association between a Python class and a database table or
+ other relational structure, so that ORM operations against the class may
+ proceed.
+
+ The :class:`_orm.Mapper` object is instantiated using mapping methods
+ present on the :class:`_orm.registry` object. For information
+ about instantiating new :class:`_orm.Mapper` objects, see
+ :ref:`orm_mapping_classes_toplevel`.
+
+ """
+
+ _dispose_called = False
+ _ready_for_configure = False
+
+ @util.deprecated_params(
+ non_primary=(
+ "1.3",
+ "The :paramref:`.mapper.non_primary` parameter is deprecated, "
+ "and will be removed in a future release. The functionality "
+ "of non primary mappers is now better suited using the "
+ ":class:`.AliasedClass` construct, which can also be used "
+ "as the target of a :func:`_orm.relationship` in 1.3.",
+ ),
+ )
+ def __init__(
+ self,
+ class_,
+ local_table=None,
+ properties=None,
+ primary_key=None,
+ non_primary=False,
+ inherits=None,
+ inherit_condition=None,
+ inherit_foreign_keys=None,
+ always_refresh=False,
+ version_id_col=None,
+ version_id_generator=None,
+ polymorphic_on=None,
+ _polymorphic_map=None,
+ polymorphic_identity=None,
+ concrete=False,
+ with_polymorphic=None,
+ polymorphic_load=None,
+ allow_partial_pks=True,
+ batch=True,
+ column_prefix=None,
+ include_properties=None,
+ exclude_properties=None,
+ passive_updates=True,
+ passive_deletes=False,
+ confirm_deleted_rows=True,
+ eager_defaults=False,
+ legacy_is_orphan=False,
+ _compiled_cache_size=100,
+ ):
+ r"""Direct constructor for a new :class:`_orm.Mapper` object.
+
+ The :func:`_orm.mapper` function is normally invoked through the
+ use of the :class:`_orm.registry` object through either the
+ :ref:`Declarative <orm_declarative_mapping>` or
+ :ref:`Imperative <orm_imperative_mapping>` mapping styles.
+
+ .. versionchanged:: 1.4 The :func:`_orm.mapper` function should not
+ be called directly for classical mapping; for a classical mapping
+ configuration, use the :meth:`_orm.registry.map_imperatively`
+ method. The :func:`_orm.mapper` function may become private in a
+ future release.
+
+ Parameters documented below may be passed to either the
+ :meth:`_orm.registry.map_imperatively` method, or may be passed in the
+ ``__mapper_args__`` declarative class attribute described at
+ :ref:`orm_declarative_mapper_options`.
+
+ :param class\_: The class to be mapped. When using Declarative,
+ this argument is automatically passed as the declared class
+ itself.
+
+ :param local_table: The :class:`_schema.Table` or other selectable
+ to which the class is mapped. May be ``None`` if
+ this mapper inherits from another mapper using single-table
+ inheritance. When using Declarative, this argument is
+ automatically passed by the extension, based on what
+ is configured via the ``__table__`` argument or via the
+ :class:`_schema.Table`
+ produced as a result of the ``__tablename__``
+ and :class:`_schema.Column` arguments present.
+
+ :param always_refresh: If True, all query operations for this mapped
+ class will overwrite all data within object instances that already
+ exist within the session, erasing any in-memory changes with
+ whatever information was loaded from the database. Usage of this
+ flag is highly discouraged; as an alternative, see the method
+ :meth:`_query.Query.populate_existing`.
+
+ :param allow_partial_pks: Defaults to True. Indicates that a
+ composite primary key with some NULL values should be considered as
+ possibly existing within the database. This affects whether a
+ mapper will assign an incoming row to an existing identity, as well
+ as if :meth:`.Session.merge` will check the database first for a
+ particular primary key value. A "partial primary key" can occur if
+ one has mapped to an OUTER JOIN, for example.
+
+ :param batch: Defaults to ``True``, indicating that save operations
+ of multiple entities can be batched together for efficiency.
+ Setting to False indicates
+ that an instance will be fully saved before saving the next
+ instance. This is used in the extremely rare case that a
+ :class:`.MapperEvents` listener requires being called
+ in between individual row persistence operations.
+
+ :param column_prefix: A string which will be prepended
+ to the mapped attribute name when :class:`_schema.Column`
+ objects are automatically assigned as attributes to the
+ mapped class. Does not affect :class:`.Column` objects that
+ are mapped explicitly in the :paramref:`.mapper.properties`
+ dictionary.
+
+ This parameter is typically useful with imperative mappings
+ that keep the :class:`.Table` object separate. Below, assuming
+ the ``user_table`` :class:`.Table` object has columns named
+ ``user_id``, ``user_name``, and ``password``::
+
+ class User(Base):
+ __table__ = user_table
+ __mapper_args__ = {'column_prefix':'_'}
+
+ The above mapping will assign the ``user_id``, ``user_name``, and
+ ``password`` columns to attributes named ``_user_id``,
+ ``_user_name``, and ``_password`` on the mapped ``User`` class.
+
+ The :paramref:`.mapper.column_prefix` parameter is uncommon in
+ modern use. For dealing with reflected tables, a more flexible
+ approach to automating a naming scheme is to intercept the
+ :class:`.Column` objects as they are reflected; see the section
+ :ref:`mapper_automated_reflection_schemes` for notes on this usage
+ pattern.
+
+ :param concrete: If True, indicates this mapper should use concrete
+ table inheritance with its parent mapper.
+
+ See the section :ref:`concrete_inheritance` for an example.
+
+ :param confirm_deleted_rows: defaults to True; when a DELETE occurs
+ of one more rows based on specific primary keys, a warning is
+ emitted when the number of rows matched does not equal the number
+ of rows expected. This parameter may be set to False to handle the
+ case where database ON DELETE CASCADE rules may be deleting some of
+ those rows automatically. The warning may be changed to an
+ exception in a future release.
+
+ .. versionadded:: 0.9.4 - added
+ :paramref:`.mapper.confirm_deleted_rows` as well as conditional
+ matched row checking on delete.
+
+ :param eager_defaults: if True, the ORM will immediately fetch the
+ value of server-generated default values after an INSERT or UPDATE,
+ rather than leaving them as expired to be fetched on next access.
+ This can be used for event schemes where the server-generated values
+ are needed immediately before the flush completes. By default,
+ this scheme will emit an individual ``SELECT`` statement per row
+ inserted or updated, which note can add significant performance
+ overhead. However, if the
+ target database supports :term:`RETURNING`, the default values will
+ be returned inline with the INSERT or UPDATE statement, which can
+ greatly enhance performance for an application that needs frequent
+ access to just-generated server defaults.
+
+ .. seealso::
+
+ :ref:`orm_server_defaults`
+
+ .. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
+ make use of :term:`RETURNING` for backends which support it.
+
+ :param exclude_properties: A list or set of string column names to
+ be excluded from mapping.
+
+ See :ref:`include_exclude_cols` for an example.
+
+ :param include_properties: An inclusive list or set of string column
+ names to map.
+
+ See :ref:`include_exclude_cols` for an example.
+
+ :param inherits: A mapped class or the corresponding
+ :class:`_orm.Mapper`
+ of one indicating a superclass to which this :class:`_orm.Mapper`
+ should *inherit* from. The mapped class here must be a subclass
+ of the other mapper's class. When using Declarative, this argument
+ is passed automatically as a result of the natural class
+ hierarchy of the declared classes.
+
+ .. seealso::
+
+ :ref:`inheritance_toplevel`
+
+ :param inherit_condition: For joined table inheritance, a SQL
+ expression which will
+ define how the two tables are joined; defaults to a natural join
+ between the two tables.
+
+ :param inherit_foreign_keys: When ``inherit_condition`` is used and
+ the columns present are missing a :class:`_schema.ForeignKey`
+ configuration, this parameter can be used to specify which columns
+ are "foreign". In most cases can be left as ``None``.
+
+ :param legacy_is_orphan: Boolean, defaults to ``False``.
+ When ``True``, specifies that "legacy" orphan consideration
+ is to be applied to objects mapped by this mapper, which means
+ that a pending (that is, not persistent) object is auto-expunged
+ from an owning :class:`.Session` only when it is de-associated
+ from *all* parents that specify a ``delete-orphan`` cascade towards
+ this mapper. The new default behavior is that the object is
+ auto-expunged when it is de-associated with *any* of its parents
+ that specify ``delete-orphan`` cascade. This behavior is more
+ consistent with that of a persistent object, and allows behavior to
+ be consistent in more scenarios independently of whether or not an
+ orphan object has been flushed yet or not.
+
+ See the change note and example at :ref:`legacy_is_orphan_addition`
+ for more detail on this change.
+
+ :param non_primary: Specify that this :class:`_orm.Mapper`
+ is in addition
+ to the "primary" mapper, that is, the one used for persistence.
+ The :class:`_orm.Mapper` created here may be used for ad-hoc
+ mapping of the class to an alternate selectable, for loading
+ only.
+
+ .. seealso::
+
+ :ref:`relationship_aliased_class` - the new pattern that removes
+ the need for the :paramref:`_orm.Mapper.non_primary` flag.
+
+ :param passive_deletes: Indicates DELETE behavior of foreign key
+ columns when a joined-table inheritance entity is being deleted.
+ Defaults to ``False`` for a base mapper; for an inheriting mapper,
+ defaults to ``False`` unless the value is set to ``True``
+ on the superclass mapper.
+
+ When ``True``, it is assumed that ON DELETE CASCADE is configured
+ on the foreign key relationships that link this mapper's table
+ to its superclass table, so that when the unit of work attempts
+ to delete the entity, it need only emit a DELETE statement for the
+ superclass table, and not this table.
+
+ When ``False``, a DELETE statement is emitted for this mapper's
+ table individually. If the primary key attributes local to this
+ table are unloaded, then a SELECT must be emitted in order to
+ validate these attributes; note that the primary key columns
+ of a joined-table subclass are not part of the "primary key" of
+ the object as a whole.
+
+ Note that a value of ``True`` is **always** forced onto the
+ subclass mappers; that is, it's not possible for a superclass
+ to specify passive_deletes without this taking effect for
+ all subclass mappers.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`passive_deletes` - description of similar feature as
+ used with :func:`_orm.relationship`
+
+ :paramref:`.mapper.passive_updates` - supporting ON UPDATE
+ CASCADE for joined-table inheritance mappers
+
+ :param passive_updates: Indicates UPDATE behavior of foreign key
+ columns when a primary key column changes on a joined-table
+ inheritance mapping. Defaults to ``True``.
+
+ When True, it is assumed that ON UPDATE CASCADE is configured on
+ the foreign key in the database, and that the database will handle
+ propagation of an UPDATE from a source column to dependent columns
+ on joined-table rows.
+
+ When False, it is assumed that the database does not enforce
+ referential integrity and will not be issuing its own CASCADE
+ operation for an update. The unit of work process will
+ emit an UPDATE statement for the dependent columns during a
+ primary key change.
+
+ .. seealso::
+
+ :ref:`passive_updates` - description of a similar feature as
+ used with :func:`_orm.relationship`
+
+ :paramref:`.mapper.passive_deletes` - supporting ON DELETE
+ CASCADE for joined-table inheritance mappers
+
+ :param polymorphic_load: Specifies "polymorphic loading" behavior
+ for a subclass in an inheritance hierarchy (joined and single
+ table inheritance only). Valid values are:
+
+ * "'inline'" - specifies this class should be part of the
+ "with_polymorphic" mappers, e.g. its columns will be included
+ in a SELECT query against the base.
+
+ * "'selectin'" - specifies that when instances of this class
+ are loaded, an additional SELECT will be emitted to retrieve
+ the columns specific to this subclass. The SELECT uses
+ IN to fetch multiple subclasses at once.
+
+ .. versionadded:: 1.2
+
+ .. seealso::
+
+ :ref:`with_polymorphic_mapper_config`
+
+ :ref:`polymorphic_selectin`
+
+ :param polymorphic_on: Specifies the column, attribute, or
+ SQL expression used to determine the target class for an
+ incoming row, when inheriting classes are present.
+
+ This value is commonly a :class:`_schema.Column` object that's
+ present in the mapped :class:`_schema.Table`::
+
+ class Employee(Base):
+ __tablename__ = 'employee'
+
+ id = Column(Integer, primary_key=True)
+ discriminator = Column(String(50))
+
+ __mapper_args__ = {
+ "polymorphic_on":discriminator,
+ "polymorphic_identity":"employee"
+ }
+
+ It may also be specified
+ as a SQL expression, as in this example where we
+ use the :func:`.case` construct to provide a conditional
+ approach::
+
+ class Employee(Base):
+ __tablename__ = 'employee'
+
+ id = Column(Integer, primary_key=True)
+ discriminator = Column(String(50))
+
+ __mapper_args__ = {
+ "polymorphic_on":case([
+ (discriminator == "EN", "engineer"),
+ (discriminator == "MA", "manager"),
+ ], else_="employee"),
+ "polymorphic_identity":"employee"
+ }
+
+ It may also refer to any attribute
+ configured with :func:`.column_property`, or to the
+ string name of one::
+
+ class Employee(Base):
+ __tablename__ = 'employee'
+
+ id = Column(Integer, primary_key=True)
+ discriminator = Column(String(50))
+ employee_type = column_property(
+ case([
+ (discriminator == "EN", "engineer"),
+ (discriminator == "MA", "manager"),
+ ], else_="employee")
+ )
+
+ __mapper_args__ = {
+ "polymorphic_on":employee_type,
+ "polymorphic_identity":"employee"
+ }
+
+ When setting ``polymorphic_on`` to reference an
+ attribute or expression that's not present in the
+ locally mapped :class:`_schema.Table`, yet the value
+ of the discriminator should be persisted to the database,
+ the value of the
+ discriminator is not automatically set on new
+ instances; this must be handled by the user,
+ either through manual means or via event listeners.
+ A typical approach to establishing such a listener
+ looks like::
+
+ from sqlalchemy import event
+ from sqlalchemy.orm import object_mapper
+
+ @event.listens_for(Employee, "init", propagate=True)
+ def set_identity(instance, *arg, **kw):
+ mapper = object_mapper(instance)
+ instance.discriminator = mapper.polymorphic_identity
+
+ Where above, we assign the value of ``polymorphic_identity``
+ for the mapped class to the ``discriminator`` attribute,
+ thus persisting the value to the ``discriminator`` column
+ in the database.
+
+ .. warning::
+
+ Currently, **only one discriminator column may be set**, typically
+ on the base-most class in the hierarchy. "Cascading" polymorphic
+ columns are not yet supported.
+
+ .. seealso::
+
+ :ref:`inheritance_toplevel`
+
+ :param polymorphic_identity: Specifies the value which
+ identifies this particular class as returned by the
+ column expression referred to by the ``polymorphic_on``
+ setting. As rows are received, the value corresponding
+ to the ``polymorphic_on`` column expression is compared
+ to this value, indicating which subclass should
+ be used for the newly reconstructed object.
+
+ :param properties: A dictionary mapping the string names of object
+ attributes to :class:`.MapperProperty` instances, which define the
+ persistence behavior of that attribute. Note that
+ :class:`_schema.Column`
+ objects present in
+ the mapped :class:`_schema.Table` are automatically placed into
+ ``ColumnProperty`` instances upon mapping, unless overridden.
+ When using Declarative, this argument is passed automatically,
+ based on all those :class:`.MapperProperty` instances declared
+ in the declared class body.
+
+ .. seealso::
+
+ :ref:`orm_mapping_properties` - in the
+ :ref:`orm_mapping_classes_toplevel`
+
+ :param primary_key: A list of :class:`_schema.Column`
+ objects which define
+ the primary key to be used against this mapper's selectable unit.
+ This is normally simply the primary key of the ``local_table``, but
+ can be overridden here.
+
+ .. seealso::
+
+ :ref:`mapper_primary_key` - background and example use
+
+ :param version_id_col: A :class:`_schema.Column`
+ that will be used to keep a running version id of rows
+ in the table. This is used to detect concurrent updates or
+ the presence of stale data in a flush. The methodology is to
+ detect if an UPDATE statement does not match the last known
+ version id, a
+ :class:`~sqlalchemy.orm.exc.StaleDataError` exception is
+ thrown.
+ By default, the column must be of :class:`.Integer` type,
+ unless ``version_id_generator`` specifies an alternative version
+ generator.
+
+ .. seealso::
+
+ :ref:`mapper_version_counter` - discussion of version counting
+ and rationale.
+
+ :param version_id_generator: Define how new version ids should
+ be generated. Defaults to ``None``, which indicates that
+ a simple integer counting scheme be employed. To provide a custom
+ versioning scheme, provide a callable function of the form::
+
+ def generate_version(version):
+ return next_version
+
+ Alternatively, server-side versioning functions such as triggers,
+ or programmatic versioning schemes outside of the version id
+ generator may be used, by specifying the value ``False``.
+ Please see :ref:`server_side_version_counter` for a discussion
+ of important points when using this option.
+
+ .. versionadded:: 0.9.0 ``version_id_generator`` supports
+ server-side version number generation.
+
+ .. seealso::
+
+ :ref:`custom_version_counter`
+
+ :ref:`server_side_version_counter`
+
+
+ :param with_polymorphic: A tuple in the form ``(<classes>,
+ <selectable>)`` indicating the default style of "polymorphic"
+ loading, that is, which tables are queried at once. <classes> is
+ any single or list of mappers and/or classes indicating the
+ inherited classes that should be loaded at once. The special value
+ ``'*'`` may be used to indicate all descending classes should be
+ loaded immediately. The second tuple argument <selectable>
+ indicates a selectable that will be used to query for multiple
+ classes.
+
+ .. seealso::
+
+ :ref:`with_polymorphic` - discussion of polymorphic querying
+ techniques.
+
+ """
+ self.class_ = util.assert_arg_type(class_, type, "class_")
+ self._sort_key = "%s.%s" % (
+ self.class_.__module__,
+ self.class_.__name__,
+ )
+
+ self.class_manager = None
+
+ self._primary_key_argument = util.to_list(primary_key)
+ self.non_primary = non_primary
+
+ self.always_refresh = always_refresh
+
+ if isinstance(version_id_col, MapperProperty):
+ self.version_id_prop = version_id_col
+ self.version_id_col = None
+ else:
+ self.version_id_col = version_id_col
+ if version_id_generator is False:
+ self.version_id_generator = False
+ elif version_id_generator is None:
+ self.version_id_generator = lambda x: (x or 0) + 1
+ else:
+ self.version_id_generator = version_id_generator
+
+ self.concrete = concrete
+ self.single = False
+ self.inherits = inherits
+ if local_table is not None:
+ self.local_table = coercions.expect(
+ roles.StrictFromClauseRole, local_table
+ )
+ else:
+ self.local_table = None
+
+ self.inherit_condition = inherit_condition
+ self.inherit_foreign_keys = inherit_foreign_keys
+ self._init_properties = properties or {}
+ self._delete_orphans = []
+ self.batch = batch
+ self.eager_defaults = eager_defaults
+ self.column_prefix = column_prefix
+ self.polymorphic_on = (
+ coercions.expect(
+ roles.ColumnArgumentOrKeyRole,
+ polymorphic_on,
+ argname="polymorphic_on",
+ )
+ if polymorphic_on is not None
+ else None
+ )
+ self._dependency_processors = []
+ self.validators = util.EMPTY_DICT
+ self.passive_updates = passive_updates
+ self.passive_deletes = passive_deletes
+ self.legacy_is_orphan = legacy_is_orphan
+ self._clause_adapter = None
+ self._requires_row_aliasing = False
+ self._inherits_equated_pairs = None
+ self._memoized_values = {}
+ self._compiled_cache_size = _compiled_cache_size
+ self._reconstructor = None
+ self.allow_partial_pks = allow_partial_pks
+
+ if self.inherits and not self.concrete:
+ self.confirm_deleted_rows = False
+ else:
+ self.confirm_deleted_rows = confirm_deleted_rows
+
+ self._set_with_polymorphic(with_polymorphic)
+ self.polymorphic_load = polymorphic_load
+
+ # our 'polymorphic identity', a string name that when located in a
+ # result set row indicates this Mapper should be used to construct
+ # the object instance for that row.
+ self.polymorphic_identity = polymorphic_identity
+
+ # a dictionary of 'polymorphic identity' names, associating those
+ # names with Mappers that will be used to construct object instances
+ # upon a select operation.
+ if _polymorphic_map is None:
+ self.polymorphic_map = {}
+ else:
+ self.polymorphic_map = _polymorphic_map
+
+ if include_properties is not None:
+ self.include_properties = util.to_set(include_properties)
+ else:
+ self.include_properties = None
+ if exclude_properties:
+ self.exclude_properties = util.to_set(exclude_properties)
+ else:
+ self.exclude_properties = None
+
+ # prevent this mapper from being constructed
+ # while a configure_mappers() is occurring (and defer a
+ # configure_mappers() until construction succeeds)
+ with _CONFIGURE_MUTEX:
+ self.dispatch._events._new_mapper_instance(class_, self)
+ self._configure_inheritance()
+ self._configure_class_instrumentation()
+ self._configure_properties()
+ self._configure_polymorphic_setter()
+ self._configure_pks()
+ self.registry._flag_new_mapper(self)
+ self._log("constructed")
+ self._expire_memoizations()
+
+ # major attributes initialized at the classlevel so that
+ # they can be Sphinx-documented.
+
+ is_mapper = True
+ """Part of the inspection API."""
+
+ represents_outer_join = False
+
+ @property
+ def mapper(self):
+ """Part of the inspection API.
+
+ Returns self.
+
+ """
+ return self
+
+ def _gen_cache_key(self, anon_map, bindparams):
+ return (self,)
+
+ @property
+ def entity(self):
+ r"""Part of the inspection API.
+
+ Returns self.class\_.
+
+ """
+ return self.class_
+
+ local_table = None
+ """The :class:`_expression.Selectable` which this :class:`_orm.Mapper`
+ manages.
+
+ Typically is an instance of :class:`_schema.Table` or
+ :class:`_expression.Alias`.
+ May also be ``None``.
+
+ The "local" table is the
+ selectable that the :class:`_orm.Mapper` is directly responsible for
+ managing from an attribute access and flush perspective. For
+ non-inheriting mappers, the local table is the same as the
+ "mapped" table. For joined-table inheritance mappers, local_table
+ will be the particular sub-table of the overall "join" which
+ this :class:`_orm.Mapper` represents. If this mapper is a
+ single-table inheriting mapper, local_table will be ``None``.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.persist_selectable`.
+
+ """
+
+ persist_selectable = None
+ """The :class:`_expression.Selectable` to which this :class:`_orm.Mapper`
+ is mapped.
+
+ Typically an instance of :class:`_schema.Table`,
+ :class:`_expression.Join`, or :class:`_expression.Alias`.
+
+ The :attr:`_orm.Mapper.persist_selectable` is separate from
+ :attr:`_orm.Mapper.selectable` in that the former represents columns
+ that are mapped on this class or its superclasses, whereas the
+ latter may be a "polymorphic" selectable that contains additional columns
+ which are in fact mapped on subclasses only.
+
+ "persist selectable" is the "thing the mapper writes to" and
+ "selectable" is the "thing the mapper selects from".
+
+ :attr:`_orm.Mapper.persist_selectable` is also separate from
+ :attr:`_orm.Mapper.local_table`, which represents the set of columns that
+ are locally mapped on this class directly.
+
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.selectable`.
+
+ :attr:`_orm.Mapper.local_table`.
+
+ """
+
+ inherits = None
+ """References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
+ inherits from, if any.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ configured = False
+ """Represent ``True`` if this :class:`_orm.Mapper` has been configured.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ .. seealso::
+
+ :func:`.configure_mappers`.
+
+ """
+
+ concrete = None
+ """Represent ``True`` if this :class:`_orm.Mapper` is a concrete
+ inheritance mapper.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ tables = None
+ """An iterable containing the collection of :class:`_schema.Table` objects
+ which this :class:`_orm.Mapper` is aware of.
+
+ If the mapper is mapped to a :class:`_expression.Join`, or an
+ :class:`_expression.Alias`
+ representing a :class:`_expression.Select`, the individual
+ :class:`_schema.Table`
+ objects that comprise the full construct will be represented here.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ primary_key = None
+ """An iterable containing the collection of :class:`_schema.Column`
+ objects
+ which comprise the 'primary key' of the mapped table, from the
+ perspective of this :class:`_orm.Mapper`.
+
+ This list is against the selectable in
+ :attr:`_orm.Mapper.persist_selectable`.
+ In the case of inheriting mappers, some columns may be managed by a
+ superclass mapper. For example, in the case of a
+ :class:`_expression.Join`, the
+ primary key is determined by all of the primary key columns across all
+ tables referenced by the :class:`_expression.Join`.
+
+ The list is also not necessarily the same as the primary key column
+ collection associated with the underlying tables; the :class:`_orm.Mapper`
+ features a ``primary_key`` argument that can override what the
+ :class:`_orm.Mapper` considers as primary key columns.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ class_ = None
+ """The Python class which this :class:`_orm.Mapper` maps.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ class_manager = None
+ """The :class:`.ClassManager` which maintains event listeners
+ and class-bound descriptors for this :class:`_orm.Mapper`.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ single = None
+ """Represent ``True`` if this :class:`_orm.Mapper` is a single table
+ inheritance mapper.
+
+ :attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ non_primary = None
+ """Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary"
+ mapper, e.g. a mapper that is used only to select rows but not for
+ persistence management.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ polymorphic_on = None
+ """The :class:`_schema.Column` or SQL expression specified as the
+ ``polymorphic_on`` argument
+ for this :class:`_orm.Mapper`, within an inheritance scenario.
+
+ This attribute is normally a :class:`_schema.Column` instance but
+ may also be an expression, such as one derived from
+ :func:`.cast`.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ polymorphic_map = None
+ """A mapping of "polymorphic identity" identifiers mapped to
+ :class:`_orm.Mapper` instances, within an inheritance scenario.
+
+ The identifiers can be of any type which is comparable to the
+ type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
+
+ An inheritance chain of mappers will all reference the same
+ polymorphic map object. The object is used to correlate incoming
+ result rows to target mappers.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ polymorphic_identity = None
+ """Represent an identifier which is matched against the
+ :attr:`_orm.Mapper.polymorphic_on` column during result row loading.
+
+ Used only with inheritance, this object can be of any type which is
+ comparable to the type of column represented by
+ :attr:`_orm.Mapper.polymorphic_on`.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ base_mapper = None
+ """The base-most :class:`_orm.Mapper` in an inheritance chain.
+
+ In a non-inheriting scenario, this attribute will always be this
+ :class:`_orm.Mapper`. In an inheritance scenario, it references
+ the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
+ objects in the inheritance chain.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ columns = None
+ """A collection of :class:`_schema.Column` or other scalar expression
+ objects maintained by this :class:`_orm.Mapper`.
+
+ The collection behaves the same as that of the ``c`` attribute on
+ any :class:`_schema.Table` object,
+ except that only those columns included in
+ this mapping are present, and are keyed based on the attribute name
+ defined in the mapping, not necessarily the ``key`` attribute of the
+ :class:`_schema.Column` itself. Additionally, scalar expressions mapped
+ by :func:`.column_property` are also present here.
+
+ This is a *read only* attribute determined during mapper construction.
+ Behavior is undefined if directly modified.
+
+ """
+
+ validators = None
+ """An immutable dictionary of attributes which have been decorated
+ using the :func:`_orm.validates` decorator.
+
+ The dictionary contains string attribute names as keys
+ mapped to the actual validation method.
+
+ """
+
+ c = None
+ """A synonym for :attr:`_orm.Mapper.columns`."""
+
+ @property
+ @util.deprecated("1.3", "Use .persist_selectable")
+ def mapped_table(self):
+ return self.persist_selectable
+
+ @util.memoized_property
+ def _path_registry(self):
+ return PathRegistry.per_mapper(self)
+
+ def _configure_inheritance(self):
+ """Configure settings related to inheriting and/or inherited mappers
+ being present."""
+
+ # a set of all mappers which inherit from this one.
+ self._inheriting_mappers = util.WeakSequence()
+
+ if self.inherits:
+ if isinstance(self.inherits, type):
+ self.inherits = class_mapper(self.inherits, configure=False)
+ if not issubclass(self.class_, self.inherits.class_):
+ raise sa_exc.ArgumentError(
+ "Class '%s' does not inherit from '%s'"
+ % (self.class_.__name__, self.inherits.class_.__name__)
+ )
+
+ self.dispatch._update(self.inherits.dispatch)
+
+ if self.non_primary != self.inherits.non_primary:
+ np = not self.non_primary and "primary" or "non-primary"
+ raise sa_exc.ArgumentError(
+ "Inheritance of %s mapper for class '%s' is "
+ "only allowed from a %s mapper"
+ % (np, self.class_.__name__, np)
+ )
+ # inherit_condition is optional.
+ if self.local_table is None:
+ self.local_table = self.inherits.local_table
+ self.persist_selectable = self.inherits.persist_selectable
+ self.single = True
+ elif self.local_table is not self.inherits.local_table:
+ if self.concrete:
+ self.persist_selectable = self.local_table
+ for mapper in self.iterate_to_root():
+ if mapper.polymorphic_on is not None:
+ mapper._requires_row_aliasing = True
+ else:
+ if self.inherit_condition is None:
+ # figure out inherit condition from our table to the
+ # immediate table of the inherited mapper, not its
+ # full table which could pull in other stuff we don't
+ # want (allows test/inheritance.InheritTest4 to pass)
+ try:
+ self.inherit_condition = sql_util.join_condition(
+ self.inherits.local_table, self.local_table
+ )
+ except sa_exc.NoForeignKeysError as nfe:
+ assert self.inherits.local_table is not None
+ assert self.local_table is not None
+ util.raise_(
+ sa_exc.NoForeignKeysError(
+ "Can't determine the inherit condition "
+ "between inherited table '%s' and "
+ "inheriting "
+ "table '%s'; tables have no "
+ "foreign key relationships established. "
+ "Please ensure the inheriting table has "
+ "a foreign key relationship to the "
+ "inherited "
+ "table, or provide an "
+ "'on clause' using "
+ "the 'inherit_condition' mapper argument."
+ % (
+ self.inherits.local_table.description,
+ self.local_table.description,
+ )
+ ),
+ replace_context=nfe,
+ )
+ except sa_exc.AmbiguousForeignKeysError as afe:
+ assert self.inherits.local_table is not None
+ assert self.local_table is not None
+ util.raise_(
+ sa_exc.AmbiguousForeignKeysError(
+ "Can't determine the inherit condition "
+ "between inherited table '%s' and "
+ "inheriting "
+ "table '%s'; tables have more than one "
+ "foreign key relationship established. "
+ "Please specify the 'on clause' using "
+ "the 'inherit_condition' mapper argument."
+ % (
+ self.inherits.local_table.description,
+ self.local_table.description,
+ )
+ ),
+ replace_context=afe,
+ )
+ self.persist_selectable = sql.join(
+ self.inherits.persist_selectable,
+ self.local_table,
+ self.inherit_condition,
+ )
+
+ fks = util.to_set(self.inherit_foreign_keys)
+ self._inherits_equated_pairs = sql_util.criterion_as_pairs(
+ self.persist_selectable.onclause,
+ consider_as_foreign_keys=fks,
+ )
+ else:
+ self.persist_selectable = self.local_table
+
+ if self.polymorphic_identity is not None and not self.concrete:
+ self._identity_class = self.inherits._identity_class
+ else:
+ self._identity_class = self.class_
+
+ if self.version_id_col is None:
+ self.version_id_col = self.inherits.version_id_col
+ self.version_id_generator = self.inherits.version_id_generator
+ elif (
+ self.inherits.version_id_col is not None
+ and self.version_id_col is not self.inherits.version_id_col
+ ):
+ util.warn(
+ "Inheriting version_id_col '%s' does not match inherited "
+ "version_id_col '%s' and will not automatically populate "
+ "the inherited versioning column. "
+ "version_id_col should only be specified on "
+ "the base-most mapper that includes versioning."
+ % (
+ self.version_id_col.description,
+ self.inherits.version_id_col.description,
+ )
+ )
+
+ self.polymorphic_map = self.inherits.polymorphic_map
+ self.batch = self.inherits.batch
+ self.inherits._inheriting_mappers.append(self)
+ self.base_mapper = self.inherits.base_mapper
+ self.passive_updates = self.inherits.passive_updates
+ self.passive_deletes = (
+ self.inherits.passive_deletes or self.passive_deletes
+ )
+ self._all_tables = self.inherits._all_tables
+
+ if self.polymorphic_identity is not None:
+ if self.polymorphic_identity in self.polymorphic_map:
+ util.warn(
+ "Reassigning polymorphic association for identity %r "
+ "from %r to %r: Check for duplicate use of %r as "
+ "value for polymorphic_identity."
+ % (
+ self.polymorphic_identity,
+ self.polymorphic_map[self.polymorphic_identity],
+ self,
+ self.polymorphic_identity,
+ )
+ )
+ self.polymorphic_map[self.polymorphic_identity] = self
+
+ if self.polymorphic_load and self.concrete:
+ raise sa_exc.ArgumentError(
+ "polymorphic_load is not currently supported "
+ "with concrete table inheritance"
+ )
+ if self.polymorphic_load == "inline":
+ self.inherits._add_with_polymorphic_subclass(self)
+ elif self.polymorphic_load == "selectin":
+ pass
+ elif self.polymorphic_load is not None:
+ raise sa_exc.ArgumentError(
+ "unknown argument for polymorphic_load: %r"
+ % self.polymorphic_load
+ )
+
+ else:
+ self._all_tables = set()
+ self.base_mapper = self
+ self.persist_selectable = self.local_table
+ if self.polymorphic_identity is not None:
+ self.polymorphic_map[self.polymorphic_identity] = self
+ self._identity_class = self.class_
+
+ if self.persist_selectable is None:
+ raise sa_exc.ArgumentError(
+ "Mapper '%s' does not have a persist_selectable specified."
+ % self
+ )
+
+ def _set_with_polymorphic(self, with_polymorphic):
+ if with_polymorphic == "*":
+ self.with_polymorphic = ("*", None)
+ elif isinstance(with_polymorphic, (tuple, list)):
+ if isinstance(
+ with_polymorphic[0], util.string_types + (tuple, list)
+ ):
+ self.with_polymorphic = with_polymorphic
+ else:
+ self.with_polymorphic = (with_polymorphic, None)
+ elif with_polymorphic is not None:
+ raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
+ else:
+ self.with_polymorphic = None
+
+ if self.with_polymorphic and self.with_polymorphic[1] is not None:
+ self.with_polymorphic = (
+ self.with_polymorphic[0],
+ coercions.expect(
+ roles.StrictFromClauseRole,
+ self.with_polymorphic[1],
+ allow_select=True,
+ ),
+ )
+
+ if self.configured:
+ self._expire_memoizations()
+
+ def _add_with_polymorphic_subclass(self, mapper):
+ subcl = mapper.class_
+ if self.with_polymorphic is None:
+ self._set_with_polymorphic((subcl,))
+ elif self.with_polymorphic[0] != "*":
+ self._set_with_polymorphic(
+ (self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
+ )
+
+ def _set_concrete_base(self, mapper):
+ """Set the given :class:`_orm.Mapper` as the 'inherits' for this
+ :class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
+ and does not already have an inherits."""
+
+ assert self.concrete
+ assert not self.inherits
+ assert isinstance(mapper, Mapper)
+ self.inherits = mapper
+ self.inherits.polymorphic_map.update(self.polymorphic_map)
+ self.polymorphic_map = self.inherits.polymorphic_map
+ for mapper in self.iterate_to_root():
+ if mapper.polymorphic_on is not None:
+ mapper._requires_row_aliasing = True
+ self.batch = self.inherits.batch
+ for mp in self.self_and_descendants:
+ mp.base_mapper = self.inherits.base_mapper
+ self.inherits._inheriting_mappers.append(self)
+ self.passive_updates = self.inherits.passive_updates
+ self._all_tables = self.inherits._all_tables
+
+ for key, prop in mapper._props.items():
+ if key not in self._props and not self._should_exclude(
+ key, key, local=False, column=None
+ ):
+ self._adapt_inherited_property(key, prop, False)
+
+ def _set_polymorphic_on(self, polymorphic_on):
+ self.polymorphic_on = polymorphic_on
+ self._configure_polymorphic_setter(True)
+
+ def _configure_class_instrumentation(self):
+ """If this mapper is to be a primary mapper (i.e. the
+ non_primary flag is not set), associate this Mapper with the
+ given class and entity name.
+
+ Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
+ name combination will return this mapper. Also decorate the
+ `__init__` method on the mapped class to include optional
+ auto-session attachment logic.
+
+ """
+
+ # we expect that declarative has applied the class manager
+ # already and set up a registry. if this is None,
+ # we will emit a deprecation warning below when we also see that
+ # it has no registry.
+ manager = attributes.manager_of_class(self.class_)
+
+ if self.non_primary:
+ if not manager or not manager.is_mapped:
+ raise sa_exc.InvalidRequestError(
+ "Class %s has no primary mapper configured. Configure "
+ "a primary mapper first before setting up a non primary "
+ "Mapper." % self.class_
+ )
+ self.class_manager = manager
+ self.registry = manager.registry
+ self._identity_class = manager.mapper._identity_class
+ manager.registry._add_non_primary_mapper(self)
+ return
+
+ if manager is not None:
+ assert manager.class_ is self.class_
+ if manager.is_mapped:
+ # changed in #7579:
+ # this message is defined in two places as of this change,
+ # also in decl_api -> _add_manager(). in 2.0, this codepath
+ # is removed as any calls to mapper() / Mapper without
+ # the registry setting up first will be rejected.
+ raise sa_exc.ArgumentError(
+ "Class '%s' already has a primary mapper defined. "
+ % self.class_
+ )
+ # else:
+ # a ClassManager may already exist as
+ # ClassManager.instrument_attribute() creates
+ # new managers for each subclass if they don't yet exist.
+
+ self.dispatch.instrument_class(self, self.class_)
+
+ # this invokes the class_instrument event and sets up
+ # the __init__ method. documented behavior is that this must
+ # occur after the instrument_class event above.
+ # yes two events with the same two words reversed and different APIs.
+ # :(
+
+ manager = instrumentation.register_class(
+ self.class_,
+ mapper=self,
+ expired_attribute_loader=util.partial(
+ loading.load_scalar_attributes, self
+ ),
+ # finalize flag means instrument the __init__ method
+ # and call the class_instrument event
+ finalize=True,
+ )
+
+ if not manager.registry:
+ util.warn_deprecated_20(
+ "Calling the mapper() function directly outside of a "
+ "declarative registry is deprecated."
+ " Please use the sqlalchemy.orm.registry.map_imperatively() "
+ "function for a classical mapping."
+ )
+ assert _legacy_registry is not None
+ _legacy_registry._add_manager(manager)
+
+ self.class_manager = manager
+ self.registry = manager.registry
+
+ # The remaining members can be added by any mapper,
+ # e_name None or not.
+ if manager.mapper is None:
+ return
+
+ event.listen(manager, "init", _event_on_init, raw=True)
+
+ for key, method in util.iterate_attributes(self.class_):
+ if key == "__init__" and hasattr(method, "_sa_original_init"):
+ method = method._sa_original_init
+ if hasattr(method, "__func__"):
+ method = method.__func__
+ if callable(method):
+ if hasattr(method, "__sa_reconstructor__"):
+ self._reconstructor = method
+ event.listen(manager, "load", _event_on_load, raw=True)
+ elif hasattr(method, "__sa_validators__"):
+ validation_opts = method.__sa_validation_opts__
+ for name in method.__sa_validators__:
+ if name in self.validators:
+ raise sa_exc.InvalidRequestError(
+ "A validation function for mapped "
+ "attribute %r on mapper %s already exists."
+ % (name, self)
+ )
+ self.validators = self.validators.union(
+ {name: (method, validation_opts)}
+ )
+
+ def _set_dispose_flags(self):
+ self.configured = True
+ self._ready_for_configure = True
+ self._dispose_called = True
+
+ self.__dict__.pop("_configure_failed", None)
+
+ def _configure_pks(self):
+ self.tables = sql_util.find_tables(self.persist_selectable)
+
+ self._pks_by_table = {}
+ self._cols_by_table = {}
+
+ all_cols = util.column_set(
+ chain(*[col.proxy_set for col in self._columntoproperty])
+ )
+
+ pk_cols = util.column_set(c for c in all_cols if c.primary_key)
+
+ # identify primary key columns which are also mapped by this mapper.
+ tables = set(self.tables + [self.persist_selectable])
+ self._all_tables.update(tables)
+ for t in tables:
+ if t.primary_key and pk_cols.issuperset(t.primary_key):
+ # ordering is important since it determines the ordering of
+ # mapper.primary_key (and therefore query.get())
+ self._pks_by_table[t] = util.ordered_column_set(
+ t.primary_key
+ ).intersection(pk_cols)
+ self._cols_by_table[t] = util.ordered_column_set(t.c).intersection(
+ all_cols
+ )
+
+ # if explicit PK argument sent, add those columns to the
+ # primary key mappings
+ if self._primary_key_argument:
+ for k in self._primary_key_argument:
+ if k.table not in self._pks_by_table:
+ self._pks_by_table[k.table] = util.OrderedSet()
+ self._pks_by_table[k.table].add(k)
+
+ # otherwise, see that we got a full PK for the mapped table
+ elif (
+ self.persist_selectable not in self._pks_by_table
+ or len(self._pks_by_table[self.persist_selectable]) == 0
+ ):
+ raise sa_exc.ArgumentError(
+ "Mapper %s could not assemble any primary "
+ "key columns for mapped table '%s'"
+ % (self, self.persist_selectable.description)
+ )
+ elif self.local_table not in self._pks_by_table and isinstance(
+ self.local_table, schema.Table
+ ):
+ util.warn(
+ "Could not assemble any primary "
+ "keys for locally mapped table '%s' - "
+ "no rows will be persisted in this Table."
+ % self.local_table.description
+ )
+
+ if (
+ self.inherits
+ and not self.concrete
+ and not self._primary_key_argument
+ ):
+ # if inheriting, the "primary key" for this mapper is
+ # that of the inheriting (unless concrete or explicit)
+ self.primary_key = self.inherits.primary_key
+ else:
+ # determine primary key from argument or persist_selectable pks
+ if self._primary_key_argument:
+ primary_key = [
+ self.persist_selectable.corresponding_column(c)
+ for c in self._primary_key_argument
+ ]
+ else:
+ # if heuristically determined PKs, reduce to the minimal set
+ # of columns by eliminating FK->PK pairs for a multi-table
+ # expression. May over-reduce for some kinds of UNIONs
+ # / CTEs; use explicit PK argument for these special cases
+ primary_key = sql_util.reduce_columns(
+ self._pks_by_table[self.persist_selectable],
+ ignore_nonexistent_tables=True,
+ )
+
+ if len(primary_key) == 0:
+ raise sa_exc.ArgumentError(
+ "Mapper %s could not assemble any primary "
+ "key columns for mapped table '%s'"
+ % (self, self.persist_selectable.description)
+ )
+
+ self.primary_key = tuple(primary_key)
+ self._log("Identified primary key columns: %s", primary_key)
+
+ # determine cols that aren't expressed within our tables; mark these
+ # as "read only" properties which are refreshed upon INSERT/UPDATE
+ self._readonly_props = set(
+ self._columntoproperty[col]
+ for col in self._columntoproperty
+ if self._columntoproperty[col] not in self._identity_key_props
+ and (
+ not hasattr(col, "table")
+ or col.table not in self._cols_by_table
+ )
+ )
+
+ def _configure_properties(self):
+
+ # TODO: consider using DedupeColumnCollection
+ self.columns = self.c = sql_base.ColumnCollection()
+
+ # object attribute names mapped to MapperProperty objects
+ self._props = util.OrderedDict()
+
+ # table columns mapped to MapperProperty
+ self._columntoproperty = _ColumnMapping(self)
+
+ # load custom properties
+ if self._init_properties:
+ for key, prop in self._init_properties.items():
+ self._configure_property(key, prop, False)
+
+ # pull properties from the inherited mapper if any.
+ if self.inherits:
+ for key, prop in self.inherits._props.items():
+ if key not in self._props and not self._should_exclude(
+ key, key, local=False, column=None
+ ):
+ self._adapt_inherited_property(key, prop, False)
+
+ # create properties for each column in the mapped table,
+ # for those columns which don't already map to a property
+ for column in self.persist_selectable.columns:
+ if column in self._columntoproperty:
+ continue
+
+ column_key = (self.column_prefix or "") + column.key
+
+ if self._should_exclude(
+ column.key,
+ column_key,
+ local=self.local_table.c.contains_column(column),
+ column=column,
+ ):
+ continue
+
+ # adjust the "key" used for this column to that
+ # of the inheriting mapper
+ for mapper in self.iterate_to_root():
+ if column in mapper._columntoproperty:
+ column_key = mapper._columntoproperty[column].key
+
+ self._configure_property(
+ column_key, column, init=False, setparent=True
+ )
+
+ def _configure_polymorphic_setter(self, init=False):
+ """Configure an attribute on the mapper representing the
+ 'polymorphic_on' column, if applicable, and not
+ already generated by _configure_properties (which is typical).
+
+ Also create a setter function which will assign this
+ attribute to the value of the 'polymorphic_identity'
+ upon instance construction, also if applicable. This
+ routine will run when an instance is created.
+
+ """
+ setter = False
+
+ if self.polymorphic_on is not None:
+ setter = True
+
+ if isinstance(self.polymorphic_on, util.string_types):
+ # polymorphic_on specified as a string - link
+ # it to mapped ColumnProperty
+ try:
+ self.polymorphic_on = self._props[self.polymorphic_on]
+ except KeyError as err:
+ util.raise_(
+ sa_exc.ArgumentError(
+ "Can't determine polymorphic_on "
+ "value '%s' - no attribute is "
+ "mapped to this name." % self.polymorphic_on
+ ),
+ replace_context=err,
+ )
+
+ if self.polymorphic_on in self._columntoproperty:
+ # polymorphic_on is a column that is already mapped
+ # to a ColumnProperty
+ prop = self._columntoproperty[self.polymorphic_on]
+ elif isinstance(self.polymorphic_on, MapperProperty):
+ # polymorphic_on is directly a MapperProperty,
+ # ensure it's a ColumnProperty
+ if not isinstance(
+ self.polymorphic_on, properties.ColumnProperty
+ ):
+ raise sa_exc.ArgumentError(
+ "Only direct column-mapped "
+ "property or SQL expression "
+ "can be passed for polymorphic_on"
+ )
+ prop = self.polymorphic_on
+ else:
+ # polymorphic_on is a Column or SQL expression and
+ # doesn't appear to be mapped. this means it can be 1.
+ # only present in the with_polymorphic selectable or
+ # 2. a totally standalone SQL expression which we'd
+ # hope is compatible with this mapper's persist_selectable
+ col = self.persist_selectable.corresponding_column(
+ self.polymorphic_on
+ )
+ if col is None:
+ # polymorphic_on doesn't derive from any
+ # column/expression isn't present in the mapped
+ # table. we will make a "hidden" ColumnProperty
+ # for it. Just check that if it's directly a
+ # schema.Column and we have with_polymorphic, it's
+ # likely a user error if the schema.Column isn't
+ # represented somehow in either persist_selectable or
+ # with_polymorphic. Otherwise as of 0.7.4 we
+ # just go with it and assume the user wants it
+ # that way (i.e. a CASE statement)
+ setter = False
+ instrument = False
+ col = self.polymorphic_on
+ if isinstance(col, schema.Column) and (
+ self.with_polymorphic is None
+ or self.with_polymorphic[1].corresponding_column(col)
+ is None
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Could not map polymorphic_on column "
+ "'%s' to the mapped table - polymorphic "
+ "loads will not function properly"
+ % col.description
+ )
+ else:
+ # column/expression that polymorphic_on derives from
+ # is present in our mapped table
+ # and is probably mapped, but polymorphic_on itself
+ # is not. This happens when
+ # the polymorphic_on is only directly present in the
+ # with_polymorphic selectable, as when use
+ # polymorphic_union.
+ # we'll make a separate ColumnProperty for it.
+ instrument = True
+ key = getattr(col, "key", None)
+ if key:
+ if self._should_exclude(col.key, col.key, False, col):
+ raise sa_exc.InvalidRequestError(
+ "Cannot exclude or override the "
+ "discriminator column %r" % col.key
+ )
+ else:
+ self.polymorphic_on = col = col.label("_sa_polymorphic_on")
+ key = col.key
+
+ prop = properties.ColumnProperty(col, _instrument=instrument)
+ self._configure_property(key, prop, init=init, setparent=True)
+
+ # the actual polymorphic_on should be the first public-facing
+ # column in the property
+ self.polymorphic_on = prop.columns[0]
+ polymorphic_key = prop.key
+
+ else:
+ # no polymorphic_on was set.
+ # check inheriting mappers for one.
+ for mapper in self.iterate_to_root():
+ # determine if polymorphic_on of the parent
+ # should be propagated here. If the col
+ # is present in our mapped table, or if our mapped
+ # table is the same as the parent (i.e. single table
+ # inheritance), we can use it
+ if mapper.polymorphic_on is not None:
+ if self.persist_selectable is mapper.persist_selectable:
+ self.polymorphic_on = mapper.polymorphic_on
+ else:
+ self.polymorphic_on = (
+ self.persist_selectable
+ ).corresponding_column(mapper.polymorphic_on)
+ # we can use the parent mapper's _set_polymorphic_identity
+ # directly; it ensures the polymorphic_identity of the
+ # instance's mapper is used so is portable to subclasses.
+ if self.polymorphic_on is not None:
+ self._set_polymorphic_identity = (
+ mapper._set_polymorphic_identity
+ )
+ self._validate_polymorphic_identity = (
+ mapper._validate_polymorphic_identity
+ )
+ else:
+ self._set_polymorphic_identity = None
+ return
+
+ if setter:
+
+ def _set_polymorphic_identity(state):
+ dict_ = state.dict
+ state.get_impl(polymorphic_key).set(
+ state,
+ dict_,
+ state.manager.mapper.polymorphic_identity,
+ None,
+ )
+
+ def _validate_polymorphic_identity(mapper, state, dict_):
+ if (
+ polymorphic_key in dict_
+ and dict_[polymorphic_key]
+ not in mapper._acceptable_polymorphic_identities
+ ):
+ util.warn_limited(
+ "Flushing object %s with "
+ "incompatible polymorphic identity %r; the "
+ "object may not refresh and/or load correctly",
+ (state_str(state), dict_[polymorphic_key]),
+ )
+
+ self._set_polymorphic_identity = _set_polymorphic_identity
+ self._validate_polymorphic_identity = (
+ _validate_polymorphic_identity
+ )
+ else:
+ self._set_polymorphic_identity = None
+
+ _validate_polymorphic_identity = None
+
+ @HasMemoized.memoized_attribute
+ def _version_id_prop(self):
+ if self.version_id_col is not None:
+ return self._columntoproperty[self.version_id_col]
+ else:
+ return None
+
+ @HasMemoized.memoized_attribute
+ def _acceptable_polymorphic_identities(self):
+ identities = set()
+
+ stack = deque([self])
+ while stack:
+ item = stack.popleft()
+ if item.persist_selectable is self.persist_selectable:
+ identities.add(item.polymorphic_identity)
+ stack.extend(item._inheriting_mappers)
+
+ return identities
+
+ @HasMemoized.memoized_attribute
+ def _prop_set(self):
+ return frozenset(self._props.values())
+
+ @util.preload_module("sqlalchemy.orm.descriptor_props")
+ def _adapt_inherited_property(self, key, prop, init):
+ descriptor_props = util.preloaded.orm_descriptor_props
+
+ if not self.concrete:
+ self._configure_property(key, prop, init=False, setparent=False)
+ elif key not in self._props:
+ # determine if the class implements this attribute; if not,
+ # or if it is implemented by the attribute that is handling the
+ # given superclass-mapped property, then we need to report that we
+ # can't use this at the instance level since we are a concrete
+ # mapper and we don't map this. don't trip user-defined
+ # descriptors that might have side effects when invoked.
+ implementing_attribute = self.class_manager._get_class_attr_mro(
+ key, prop
+ )
+ if implementing_attribute is prop or (
+ isinstance(
+ implementing_attribute, attributes.InstrumentedAttribute
+ )
+ and implementing_attribute._parententity is prop.parent
+ ):
+ self._configure_property(
+ key,
+ descriptor_props.ConcreteInheritedProperty(),
+ init=init,
+ setparent=True,
+ )
+
+ @util.preload_module("sqlalchemy.orm.descriptor_props")
+ def _configure_property(self, key, prop, init=True, setparent=True):
+ descriptor_props = util.preloaded.orm_descriptor_props
+ self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
+
+ if not isinstance(prop, MapperProperty):
+ prop = self._property_from_column(key, prop)
+
+ if isinstance(prop, properties.ColumnProperty):
+ col = self.persist_selectable.corresponding_column(prop.columns[0])
+
+ # if the column is not present in the mapped table,
+ # test if a column has been added after the fact to the
+ # parent table (or their parent, etc.) [ticket:1570]
+ if col is None and self.inherits:
+ path = [self]
+ for m in self.inherits.iterate_to_root():
+ col = m.local_table.corresponding_column(prop.columns[0])
+ if col is not None:
+ for m2 in path:
+ m2.persist_selectable._refresh_for_new_column(col)
+ col = self.persist_selectable.corresponding_column(
+ prop.columns[0]
+ )
+ break
+ path.append(m)
+
+ # subquery expression, column not present in the mapped
+ # selectable.
+ if col is None:
+ col = prop.columns[0]
+
+ # column is coming in after _readonly_props was
+ # initialized; check for 'readonly'
+ if hasattr(self, "_readonly_props") and (
+ not hasattr(col, "table")
+ or col.table not in self._cols_by_table
+ ):
+ self._readonly_props.add(prop)
+
+ else:
+ # if column is coming in after _cols_by_table was
+ # initialized, ensure the col is in the right set
+ if (
+ hasattr(self, "_cols_by_table")
+ and col.table in self._cols_by_table
+ and col not in self._cols_by_table[col.table]
+ ):
+ self._cols_by_table[col.table].add(col)
+
+ # if this properties.ColumnProperty represents the "polymorphic
+ # discriminator" column, mark it. We'll need this when rendering
+ # columns in SELECT statements.
+ if not hasattr(prop, "_is_polymorphic_discriminator"):
+ prop._is_polymorphic_discriminator = (
+ col is self.polymorphic_on
+ or prop.columns[0] is self.polymorphic_on
+ )
+
+ if isinstance(col, expression.Label):
+ # new in 1.4, get column property against expressions
+ # to be addressable in subqueries
+ col.key = col._tq_key_label = key
+
+ self.columns.add(col, key)
+ for col in prop.columns:
+ for col in col.proxy_set:
+ self._columntoproperty[col] = prop
+
+ prop.key = key
+
+ if setparent:
+ prop.set_parent(self, init)
+
+ if key in self._props and getattr(
+ self._props[key], "_mapped_by_synonym", False
+ ):
+ syn = self._props[key]._mapped_by_synonym
+ raise sa_exc.ArgumentError(
+ "Can't call map_column=True for synonym %r=%r, "
+ "a ColumnProperty already exists keyed to the name "
+ "%r for column %r" % (syn, key, key, syn)
+ )
+
+ if (
+ key in self._props
+ and not isinstance(prop, properties.ColumnProperty)
+ and not isinstance(
+ self._props[key],
+ (
+ properties.ColumnProperty,
+ descriptor_props.ConcreteInheritedProperty,
+ ),
+ )
+ ):
+ util.warn(
+ "Property %s on %s being replaced with new "
+ "property %s; the old property will be discarded"
+ % (self._props[key], self, prop)
+ )
+ oldprop = self._props[key]
+ self._path_registry.pop(oldprop, None)
+
+ self._props[key] = prop
+
+ if not self.non_primary:
+ prop.instrument_class(self)
+
+ for mapper in self._inheriting_mappers:
+ mapper._adapt_inherited_property(key, prop, init)
+
+ if init:
+ prop.init()
+ prop.post_instrument_class(self)
+
+ if self.configured:
+ self._expire_memoizations()
+
+ @util.preload_module("sqlalchemy.orm.descriptor_props")
+ def _property_from_column(self, key, prop):
+ """generate/update a :class:`.ColumnProperty` given a
+ :class:`_schema.Column` object."""
+ descriptor_props = util.preloaded.orm_descriptor_props
+ # we were passed a Column or a list of Columns;
+ # generate a properties.ColumnProperty
+ columns = util.to_list(prop)
+ column = columns[0]
+ assert isinstance(column, expression.ColumnElement)
+
+ prop = self._props.get(key, None)
+
+ if isinstance(prop, properties.ColumnProperty):
+ if (
+ (
+ not self._inherits_equated_pairs
+ or (prop.columns[0], column)
+ not in self._inherits_equated_pairs
+ )
+ and not prop.columns[0].shares_lineage(column)
+ and prop.columns[0] is not self.version_id_col
+ and column is not self.version_id_col
+ ):
+ warn_only = prop.parent is not self
+ msg = (
+ "Implicitly combining column %s with column "
+ "%s under attribute '%s'. Please configure one "
+ "or more attributes for these same-named columns "
+ "explicitly." % (prop.columns[-1], column, key)
+ )
+ if warn_only:
+ util.warn(msg)
+ else:
+ raise sa_exc.InvalidRequestError(msg)
+
+ # existing properties.ColumnProperty from an inheriting
+ # mapper. make a copy and append our column to it
+ prop = prop.copy()
+ prop.columns.insert(0, column)
+ self._log(
+ "inserting column to existing list "
+ "in properties.ColumnProperty %s" % (key)
+ )
+ return prop
+ elif prop is None or isinstance(
+ prop, descriptor_props.ConcreteInheritedProperty
+ ):
+ mapped_column = []
+ for c in columns:
+ mc = self.persist_selectable.corresponding_column(c)
+ if mc is None:
+ mc = self.local_table.corresponding_column(c)
+ if mc is not None:
+ # if the column is in the local table but not the
+ # mapped table, this corresponds to adding a
+ # column after the fact to the local table.
+ # [ticket:1523]
+ self.persist_selectable._refresh_for_new_column(mc)
+ mc = self.persist_selectable.corresponding_column(c)
+ if mc is None:
+ raise sa_exc.ArgumentError(
+ "When configuring property '%s' on %s, "
+ "column '%s' is not represented in the mapper's "
+ "table. Use the `column_property()` function to "
+ "force this column to be mapped as a read-only "
+ "attribute." % (key, self, c)
+ )
+ mapped_column.append(mc)
+ return properties.ColumnProperty(*mapped_column)
+ else:
+ raise sa_exc.ArgumentError(
+ "WARNING: when configuring property '%s' on %s, "
+ "column '%s' conflicts with property '%r'. "
+ "To resolve this, map the column to the class under a "
+ "different name in the 'properties' dictionary. Or, "
+ "to remove all awareness of the column entirely "
+ "(including its availability as a foreign key), "
+ "use the 'include_properties' or 'exclude_properties' "
+ "mapper arguments to control specifically which table "
+ "columns get mapped." % (key, self, column.key, prop)
+ )
+
+ def _check_configure(self):
+ if self.registry._new_mappers:
+ _configure_registries({self.registry}, cascade=True)
+
+ def _post_configure_properties(self):
+ """Call the ``init()`` method on all ``MapperProperties``
+ attached to this mapper.
+
+ This is a deferred configuration step which is intended
+ to execute once all mappers have been constructed.
+
+ """
+
+ self._log("_post_configure_properties() started")
+ l = [(key, prop) for key, prop in self._props.items()]
+ for key, prop in l:
+ self._log("initialize prop %s", key)
+
+ if prop.parent is self and not prop._configure_started:
+ prop.init()
+
+ if prop._configure_finished:
+ prop.post_instrument_class(self)
+
+ self._log("_post_configure_properties() complete")
+ self.configured = True
+
+ def add_properties(self, dict_of_properties):
+ """Add the given dictionary of properties to this mapper,
+ using `add_property`.
+
+ """
+ for key, value in dict_of_properties.items():
+ self.add_property(key, value)
+
+ def add_property(self, key, prop):
+ """Add an individual MapperProperty to this mapper.
+
+ If the mapper has not been configured yet, just adds the
+ property to the initial properties dictionary sent to the
+ constructor. If this Mapper has already been configured, then
+ the given MapperProperty is configured immediately.
+
+ """
+ self._init_properties[key] = prop
+ self._configure_property(key, prop, init=self.configured)
+
+ def _expire_memoizations(self):
+ for mapper in self.iterate_to_root():
+ mapper._reset_memoizations()
+
+ @property
+ def _log_desc(self):
+ return (
+ "("
+ + self.class_.__name__
+ + "|"
+ + (
+ self.local_table is not None
+ and self.local_table.description
+ or str(self.local_table)
+ )
+ + (self.non_primary and "|non-primary" or "")
+ + ")"
+ )
+
+ def _log(self, msg, *args):
+ self.logger.info("%s " + msg, *((self._log_desc,) + args))
+
+ def _log_debug(self, msg, *args):
+ self.logger.debug("%s " + msg, *((self._log_desc,) + args))
+
+ def __repr__(self):
+ return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
+
+ def __str__(self):
+ return "mapped class %s%s->%s" % (
+ self.class_.__name__,
+ self.non_primary and " (non-primary)" or "",
+ self.local_table.description
+ if self.local_table is not None
+ else self.persist_selectable.description,
+ )
+
+ def _is_orphan(self, state):
+ orphan_possible = False
+ for mapper in self.iterate_to_root():
+ for (key, cls) in mapper._delete_orphans:
+ orphan_possible = True
+
+ has_parent = attributes.manager_of_class(cls).has_parent(
+ state, key, optimistic=state.has_identity
+ )
+
+ if self.legacy_is_orphan and has_parent:
+ return False
+ elif not self.legacy_is_orphan and not has_parent:
+ return True
+
+ if self.legacy_is_orphan:
+ return orphan_possible
+ else:
+ return False
+
+ def has_property(self, key):
+ return key in self._props
+
+ def get_property(self, key, _configure_mappers=True):
+ """return a MapperProperty associated with the given key."""
+
+ if _configure_mappers:
+ self._check_configure()
+
+ try:
+ return self._props[key]
+ except KeyError as err:
+ util.raise_(
+ sa_exc.InvalidRequestError(
+ "Mapper '%s' has no property '%s'" % (self, key)
+ ),
+ replace_context=err,
+ )
+
+ def get_property_by_column(self, column):
+ """Given a :class:`_schema.Column` object, return the
+ :class:`.MapperProperty` which maps this column."""
+
+ return self._columntoproperty[column]
+
+ @property
+ def iterate_properties(self):
+ """return an iterator of all MapperProperty objects."""
+
+ self._check_configure()
+ return iter(self._props.values())
+
+ def _mappers_from_spec(self, spec, selectable):
+ """given a with_polymorphic() argument, return the set of mappers it
+ represents.
+
+ Trims the list of mappers to just those represented within the given
+ selectable, if present. This helps some more legacy-ish mappings.
+
+ """
+ if spec == "*":
+ mappers = list(self.self_and_descendants)
+ elif spec:
+ mappers = set()
+ for m in util.to_list(spec):
+ m = _class_to_mapper(m)
+ if not m.isa(self):
+ raise sa_exc.InvalidRequestError(
+ "%r does not inherit from %r" % (m, self)
+ )
+
+ if selectable is None:
+ mappers.update(m.iterate_to_root())
+ else:
+ mappers.add(m)
+ mappers = [m for m in self.self_and_descendants if m in mappers]
+ else:
+ mappers = []
+
+ if selectable is not None:
+ tables = set(
+ sql_util.find_tables(selectable, include_aliases=True)
+ )
+ mappers = [m for m in mappers if m.local_table in tables]
+ return mappers
+
+ def _selectable_from_mappers(self, mappers, innerjoin):
+ """given a list of mappers (assumed to be within this mapper's
+ inheritance hierarchy), construct an outerjoin amongst those mapper's
+ mapped tables.
+
+ """
+ from_obj = self.persist_selectable
+ for m in mappers:
+ if m is self:
+ continue
+ if m.concrete:
+ raise sa_exc.InvalidRequestError(
+ "'with_polymorphic()' requires 'selectable' argument "
+ "when concrete-inheriting mappers are used."
+ )
+ elif not m.single:
+ if innerjoin:
+ from_obj = from_obj.join(
+ m.local_table, m.inherit_condition
+ )
+ else:
+ from_obj = from_obj.outerjoin(
+ m.local_table, m.inherit_condition
+ )
+
+ return from_obj
+
+ @HasMemoized.memoized_attribute
+ def _single_table_criterion(self):
+ if self.single and self.inherits and self.polymorphic_on is not None:
+ return self.polymorphic_on._annotate(
+ {"parententity": self, "parentmapper": self}
+ ).in_(m.polymorphic_identity for m in self.self_and_descendants)
+ else:
+ return None
+
+ @HasMemoized.memoized_attribute
+ def _with_polymorphic_mappers(self):
+ self._check_configure()
+
+ if not self.with_polymorphic:
+ return []
+ return self._mappers_from_spec(*self.with_polymorphic)
+
+ @HasMemoized.memoized_attribute
+ def _post_inspect(self):
+ """This hook is invoked by attribute inspection.
+
+ E.g. when Query calls:
+
+ coercions.expect(roles.ColumnsClauseRole, ent, keep_inspect=True)
+
+ This allows the inspection process run a configure mappers hook.
+
+ """
+ self._check_configure()
+
+ @HasMemoized.memoized_attribute
+ def _with_polymorphic_selectable(self):
+ if not self.with_polymorphic:
+ return self.persist_selectable
+
+ spec, selectable = self.with_polymorphic
+ if selectable is not None:
+ return selectable
+ else:
+ return self._selectable_from_mappers(
+ self._mappers_from_spec(spec, selectable), False
+ )
+
+ with_polymorphic_mappers = _with_polymorphic_mappers
+ """The list of :class:`_orm.Mapper` objects included in the
+ default "polymorphic" query.
+
+ """
+
+ @HasMemoized.memoized_attribute
+ def _insert_cols_evaluating_none(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ col for col in columns if col.type.should_evaluate_none
+ ),
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _insert_cols_as_none(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ col.key
+ for col in columns
+ if not col.primary_key
+ and not col.server_default
+ and not col.default
+ and not col.type.should_evaluate_none
+ ),
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _propkey_to_col(self):
+ return dict(
+ (
+ table,
+ dict(
+ (self._columntoproperty[col].key, col) for col in columns
+ ),
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _pk_keys_by_table(self):
+ return dict(
+ (table, frozenset([col.key for col in pks]))
+ for table, pks in self._pks_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _pk_attr_keys_by_table(self):
+ return dict(
+ (
+ table,
+ frozenset([self._columntoproperty[col].key for col in pks]),
+ )
+ for table, pks in self._pks_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _server_default_cols(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ [
+ col.key
+ for col in columns
+ if col.server_default is not None
+ ]
+ ),
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @HasMemoized.memoized_attribute
+ def _server_default_plus_onupdate_propkeys(self):
+ result = set()
+
+ for table, columns in self._cols_by_table.items():
+ for col in columns:
+ if (
+ col.server_default is not None
+ or col.server_onupdate is not None
+ ) and col in self._columntoproperty:
+ result.add(self._columntoproperty[col].key)
+
+ return result
+
+ @HasMemoized.memoized_attribute
+ def _server_onupdate_default_cols(self):
+ return dict(
+ (
+ table,
+ frozenset(
+ [
+ col.key
+ for col in columns
+ if col.server_onupdate is not None
+ ]
+ ),
+ )
+ for table, columns in self._cols_by_table.items()
+ )
+
+ @HasMemoized.memoized_instancemethod
+ def __clause_element__(self):
+
+ annotations = {
+ "entity_namespace": self,
+ "parententity": self,
+ "parentmapper": self,
+ }
+ if self.persist_selectable is not self.local_table:
+ # joined table inheritance, with polymorphic selectable,
+ # etc.
+ annotations["dml_table"] = self.local_table._annotate(
+ {
+ "entity_namespace": self,
+ "parententity": self,
+ "parentmapper": self,
+ }
+ )._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": self}
+ )
+
+ return self.selectable._annotate(annotations)._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": self}
+ )
+
+ @util.memoized_property
+ def select_identity_token(self):
+ return (
+ expression.null()
+ ._annotate(
+ {
+ "entity_namespace": self,
+ "parententity": self,
+ "parentmapper": self,
+ "identity_token": True,
+ }
+ )
+ ._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": self}
+ )
+ )
+
+ @property
+ def selectable(self):
+ """The :class:`_schema.FromClause` construct this
+ :class:`_orm.Mapper` selects from by default.
+
+ Normally, this is equivalent to :attr:`.persist_selectable`, unless
+ the ``with_polymorphic`` feature is in use, in which case the
+ full "polymorphic" selectable is returned.
+
+ """
+ return self._with_polymorphic_selectable
+
+ def _with_polymorphic_args(
+ self, spec=None, selectable=False, innerjoin=False
+ ):
+ if selectable not in (None, False):
+ selectable = coercions.expect(
+ roles.StrictFromClauseRole, selectable, allow_select=True
+ )
+
+ if self.with_polymorphic:
+ if not spec:
+ spec = self.with_polymorphic[0]
+ if selectable is False:
+ selectable = self.with_polymorphic[1]
+ elif selectable is False:
+ selectable = None
+ mappers = self._mappers_from_spec(spec, selectable)
+ if selectable is not None:
+ return mappers, selectable
+ else:
+ return mappers, self._selectable_from_mappers(mappers, innerjoin)
+
+ @HasMemoized.memoized_attribute
+ def _polymorphic_properties(self):
+ return list(
+ self._iterate_polymorphic_properties(
+ self._with_polymorphic_mappers
+ )
+ )
+
+ @property
+ def _all_column_expressions(self):
+ poly_properties = self._polymorphic_properties
+ adapter = self._polymorphic_adapter
+
+ return [
+ adapter.columns[prop.columns[0]] if adapter else prop.columns[0]
+ for prop in poly_properties
+ if isinstance(prop, properties.ColumnProperty)
+ and prop._renders_in_subqueries
+ ]
+
+ def _columns_plus_keys(self, polymorphic_mappers=()):
+ if polymorphic_mappers:
+ poly_properties = self._iterate_polymorphic_properties(
+ polymorphic_mappers
+ )
+ else:
+ poly_properties = self._polymorphic_properties
+
+ return [
+ (prop.key, prop.columns[0])
+ for prop in poly_properties
+ if isinstance(prop, properties.ColumnProperty)
+ ]
+
+ @HasMemoized.memoized_attribute
+ def _polymorphic_adapter(self):
+ if self.with_polymorphic:
+ return sql_util.ColumnAdapter(
+ self.selectable, equivalents=self._equivalent_columns
+ )
+ else:
+ return None
+
+ def _iterate_polymorphic_properties(self, mappers=None):
+ """Return an iterator of MapperProperty objects which will render into
+ a SELECT."""
+ if mappers is None:
+ mappers = self._with_polymorphic_mappers
+
+ if not mappers:
+ for c in self.iterate_properties:
+ yield c
+ else:
+ # in the polymorphic case, filter out discriminator columns
+ # from other mappers, as these are sometimes dependent on that
+ # mapper's polymorphic selectable (which we don't want rendered)
+ for c in util.unique_list(
+ chain(
+ *[
+ list(mapper.iterate_properties)
+ for mapper in [self] + mappers
+ ]
+ )
+ ):
+ if getattr(c, "_is_polymorphic_discriminator", False) and (
+ self.polymorphic_on is None
+ or c.columns[0] is not self.polymorphic_on
+ ):
+ continue
+ yield c
+
+ @HasMemoized.memoized_attribute
+ def attrs(self):
+ """A namespace of all :class:`.MapperProperty` objects
+ associated this mapper.
+
+ This is an object that provides each property based on
+ its key name. For instance, the mapper for a
+ ``User`` class which has ``User.name`` attribute would
+ provide ``mapper.attrs.name``, which would be the
+ :class:`.ColumnProperty` representing the ``name``
+ column. The namespace object can also be iterated,
+ which would yield each :class:`.MapperProperty`.
+
+ :class:`_orm.Mapper` has several pre-filtered views
+ of this attribute which limit the types of properties
+ returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
+ :attr:`.relationships`, and :attr:`.composites`.
+
+ .. warning::
+
+ The :attr:`_orm.Mapper.attrs` accessor namespace is an
+ instance of :class:`.OrderedProperties`. This is
+ a dictionary-like object which includes a small number of
+ named methods such as :meth:`.OrderedProperties.items`
+ and :meth:`.OrderedProperties.values`. When
+ accessing attributes dynamically, favor using the dict-access
+ scheme, e.g. ``mapper.attrs[somename]`` over
+ ``getattr(mapper.attrs, somename)`` to avoid name collisions.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.all_orm_descriptors`
+
+ """
+
+ self._check_configure()
+ return util.ImmutableProperties(self._props)
+
+ @HasMemoized.memoized_attribute
+ def all_orm_descriptors(self):
+ """A namespace of all :class:`.InspectionAttr` attributes associated
+ with the mapped class.
+
+ These attributes are in all cases Python :term:`descriptors`
+ associated with the mapped class or its superclasses.
+
+ This namespace includes attributes that are mapped to the class
+ as well as attributes declared by extension modules.
+ It includes any Python descriptor type that inherits from
+ :class:`.InspectionAttr`. This includes
+ :class:`.QueryableAttribute`, as well as extension types such as
+ :class:`.hybrid_property`, :class:`.hybrid_method` and
+ :class:`.AssociationProxy`.
+
+ To distinguish between mapped attributes and extension attributes,
+ the attribute :attr:`.InspectionAttr.extension_type` will refer
+ to a constant that distinguishes between different extension types.
+
+ The sorting of the attributes is based on the following rules:
+
+ 1. Iterate through the class and its superclasses in order from
+ subclass to superclass (i.e. iterate through ``cls.__mro__``)
+
+ 2. For each class, yield the attributes in the order in which they
+ appear in ``__dict__``, with the exception of those in step
+ 3 below. In Python 3.6 and above this ordering will be the
+ same as that of the class' construction, with the exception
+ of attributes that were added after the fact by the application
+ or the mapper.
+
+ 3. If a certain attribute key is also in the superclass ``__dict__``,
+ then it's included in the iteration for that class, and not the
+ class in which it first appeared.
+
+ The above process produces an ordering that is deterministic in terms
+ of the order in which attributes were assigned to the class.
+
+ .. versionchanged:: 1.3.19 ensured deterministic ordering for
+ :meth:`_orm.Mapper.all_orm_descriptors`.
+
+ When dealing with a :class:`.QueryableAttribute`, the
+ :attr:`.QueryableAttribute.property` attribute refers to the
+ :class:`.MapperProperty` property, which is what you get when
+ referring to the collection of mapped properties via
+ :attr:`_orm.Mapper.attrs`.
+
+ .. warning::
+
+ The :attr:`_orm.Mapper.all_orm_descriptors`
+ accessor namespace is an
+ instance of :class:`.OrderedProperties`. This is
+ a dictionary-like object which includes a small number of
+ named methods such as :meth:`.OrderedProperties.items`
+ and :meth:`.OrderedProperties.values`. When
+ accessing attributes dynamically, favor using the dict-access
+ scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
+ ``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
+ collisions.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.attrs`
+
+ """
+ return util.ImmutableProperties(
+ dict(self.class_manager._all_sqla_attributes())
+ )
+
+ @HasMemoized.memoized_attribute
+ @util.preload_module("sqlalchemy.orm.descriptor_props")
+ def synonyms(self):
+ """Return a namespace of all :class:`.SynonymProperty`
+ properties maintained by this :class:`_orm.Mapper`.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.attrs` - namespace of all
+ :class:`.MapperProperty`
+ objects.
+
+ """
+ descriptor_props = util.preloaded.orm_descriptor_props
+
+ return self._filter_properties(descriptor_props.SynonymProperty)
+
+ @property
+ def entity_namespace(self):
+ return self.class_
+
+ @HasMemoized.memoized_attribute
+ def column_attrs(self):
+ """Return a namespace of all :class:`.ColumnProperty`
+ properties maintained by this :class:`_orm.Mapper`.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.attrs` - namespace of all
+ :class:`.MapperProperty`
+ objects.
+
+ """
+ return self._filter_properties(properties.ColumnProperty)
+
+ @util.preload_module("sqlalchemy.orm.relationships")
+ @HasMemoized.memoized_attribute
+ def relationships(self):
+ """A namespace of all :class:`.RelationshipProperty` properties
+ maintained by this :class:`_orm.Mapper`.
+
+ .. warning::
+
+ the :attr:`_orm.Mapper.relationships` accessor namespace is an
+ instance of :class:`.OrderedProperties`. This is
+ a dictionary-like object which includes a small number of
+ named methods such as :meth:`.OrderedProperties.items`
+ and :meth:`.OrderedProperties.values`. When
+ accessing attributes dynamically, favor using the dict-access
+ scheme, e.g. ``mapper.relationships[somename]`` over
+ ``getattr(mapper.relationships, somename)`` to avoid name
+ collisions.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.attrs` - namespace of all
+ :class:`.MapperProperty`
+ objects.
+
+ """
+ return self._filter_properties(
+ util.preloaded.orm_relationships.RelationshipProperty
+ )
+
+ @HasMemoized.memoized_attribute
+ @util.preload_module("sqlalchemy.orm.descriptor_props")
+ def composites(self):
+ """Return a namespace of all :class:`.CompositeProperty`
+ properties maintained by this :class:`_orm.Mapper`.
+
+ .. seealso::
+
+ :attr:`_orm.Mapper.attrs` - namespace of all
+ :class:`.MapperProperty`
+ objects.
+
+ """
+ return self._filter_properties(
+ util.preloaded.orm_descriptor_props.CompositeProperty
+ )
+
+ def _filter_properties(self, type_):
+ self._check_configure()
+ return util.ImmutableProperties(
+ util.OrderedDict(
+ (k, v) for k, v in self._props.items() if isinstance(v, type_)
+ )
+ )
+
+ @HasMemoized.memoized_attribute
+ def _get_clause(self):
+ """create a "get clause" based on the primary key. this is used
+ by query.get() and many-to-one lazyloads to load this item
+ by primary key.
+
+ """
+ params = [
+ (
+ primary_key,
+ sql.bindparam("pk_%d" % idx, type_=primary_key.type),
+ )
+ for idx, primary_key in enumerate(self.primary_key, 1)
+ ]
+ return (
+ sql.and_(*[k == v for (k, v) in params]),
+ util.column_dict(params),
+ )
+
+ @HasMemoized.memoized_attribute
+ def _equivalent_columns(self):
+ """Create a map of all equivalent columns, based on
+ the determination of column pairs that are equated to
+ one another based on inherit condition. This is designed
+ to work with the queries that util.polymorphic_union
+ comes up with, which often don't include the columns from
+ the base table directly (including the subclass table columns
+ only).
+
+ The resulting structure is a dictionary of columns mapped
+ to lists of equivalent columns, e.g.::
+
+ {
+ tablea.col1:
+ {tableb.col1, tablec.col1},
+ tablea.col2:
+ {tabled.col2}
+ }
+
+ """
+ result = util.column_dict()
+
+ def visit_binary(binary):
+ if binary.operator == operators.eq:
+ if binary.left in result:
+ result[binary.left].add(binary.right)
+ else:
+ result[binary.left] = util.column_set((binary.right,))
+ if binary.right in result:
+ result[binary.right].add(binary.left)
+ else:
+ result[binary.right] = util.column_set((binary.left,))
+
+ for mapper in self.base_mapper.self_and_descendants:
+ if mapper.inherit_condition is not None:
+ visitors.traverse(
+ mapper.inherit_condition, {}, {"binary": visit_binary}
+ )
+
+ return result
+
+ def _is_userland_descriptor(self, assigned_name, obj):
+ if isinstance(
+ obj,
+ (
+ _MappedAttribute,
+ instrumentation.ClassManager,
+ expression.ColumnElement,
+ ),
+ ):
+ return False
+ else:
+ return assigned_name not in self._dataclass_fields
+
+ @HasMemoized.memoized_attribute
+ def _dataclass_fields(self):
+ return [f.name for f in util.dataclass_fields(self.class_)]
+
+ def _should_exclude(self, name, assigned_name, local, column):
+ """determine whether a particular property should be implicitly
+ present on the class.
+
+ This occurs when properties are propagated from an inherited class, or
+ are applied from the columns present in the mapped table.
+
+ """
+
+ # check for class-bound attributes and/or descriptors,
+ # either local or from an inherited class
+ # ignore dataclass field default values
+ if local:
+ if self.class_.__dict__.get(
+ assigned_name, None
+ ) is not None and self._is_userland_descriptor(
+ assigned_name, self.class_.__dict__[assigned_name]
+ ):
+ return True
+ else:
+ attr = self.class_manager._get_class_attr_mro(assigned_name, None)
+ if attr is not None and self._is_userland_descriptor(
+ assigned_name, attr
+ ):
+ return True
+
+ if (
+ self.include_properties is not None
+ and name not in self.include_properties
+ and (column is None or column not in self.include_properties)
+ ):
+ self._log("not including property %s" % (name))
+ return True
+
+ if self.exclude_properties is not None and (
+ name in self.exclude_properties
+ or (column is not None and column in self.exclude_properties)
+ ):
+ self._log("excluding property %s" % (name))
+ return True
+
+ return False
+
+ def common_parent(self, other):
+ """Return true if the given mapper shares a
+ common inherited parent as this mapper."""
+
+ return self.base_mapper is other.base_mapper
+
+ def is_sibling(self, other):
+ """return true if the other mapper is an inheriting sibling to this
+ one. common parent but different branch
+
+ """
+ return (
+ self.base_mapper is other.base_mapper
+ and not self.isa(other)
+ and not other.isa(self)
+ )
+
+ def _canload(self, state, allow_subtypes):
+ s = self.primary_mapper()
+ if self.polymorphic_on is not None or allow_subtypes:
+ return _state_mapper(state).isa(s)
+ else:
+ return _state_mapper(state) is s
+
+ def isa(self, other):
+ """Return True if the this mapper inherits from the given mapper."""
+
+ m = self
+ while m and m is not other:
+ m = m.inherits
+ return bool(m)
+
+ def iterate_to_root(self):
+ m = self
+ while m:
+ yield m
+ m = m.inherits
+
+ @HasMemoized.memoized_attribute
+ def self_and_descendants(self):
+ """The collection including this mapper and all descendant mappers.
+
+ This includes not just the immediately inheriting mappers but
+ all their inheriting mappers as well.
+
+ """
+ descendants = []
+ stack = deque([self])
+ while stack:
+ item = stack.popleft()
+ descendants.append(item)
+ stack.extend(item._inheriting_mappers)
+ return util.WeakSequence(descendants)
+
+ def polymorphic_iterator(self):
+ """Iterate through the collection including this mapper and
+ all descendant mappers.
+
+ This includes not just the immediately inheriting mappers but
+ all their inheriting mappers as well.
+
+ To iterate through an entire hierarchy, use
+ ``mapper.base_mapper.polymorphic_iterator()``.
+
+ """
+ return iter(self.self_and_descendants)
+
+ def primary_mapper(self):
+ """Return the primary mapper corresponding to this mapper's class key
+ (class)."""
+
+ return self.class_manager.mapper
+
+ @property
+ def primary_base_mapper(self):
+ return self.class_manager.mapper.base_mapper
+
+ def _result_has_identity_key(self, result, adapter=None):
+ pk_cols = self.primary_key
+ if adapter:
+ pk_cols = [adapter.columns[c] for c in pk_cols]
+ rk = result.keys()
+ for col in pk_cols:
+ if col not in rk:
+ return False
+ else:
+ return True
+
+ def identity_key_from_row(self, row, identity_token=None, adapter=None):
+ """Return an identity-map key for use in storing/retrieving an
+ item from the identity map.
+
+ :param row: A :class:`.Row` instance. The columns which are
+ mapped by this :class:`_orm.Mapper` should be locatable in the row,
+ preferably via the :class:`_schema.Column`
+ object directly (as is the case
+ when a :func:`_expression.select` construct is executed), or
+ via string names of the form ``<tablename>_<colname>``.
+
+ """
+ pk_cols = self.primary_key
+ if adapter:
+ pk_cols = [adapter.columns[c] for c in pk_cols]
+
+ return (
+ self._identity_class,
+ tuple(row[column] for column in pk_cols),
+ identity_token,
+ )
+
+ def identity_key_from_primary_key(self, primary_key, identity_token=None):
+ """Return an identity-map key for use in storing/retrieving an
+ item from an identity map.
+
+ :param primary_key: A list of values indicating the identifier.
+
+ """
+ return self._identity_class, tuple(primary_key), identity_token
+
+ def identity_key_from_instance(self, instance):
+ """Return the identity key for the given instance, based on
+ its primary key attributes.
+
+ If the instance's state is expired, calling this method
+ will result in a database check to see if the object has been deleted.
+ If the row no longer exists,
+ :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
+
+ This value is typically also found on the instance state under the
+ attribute name `key`.
+
+ """
+ state = attributes.instance_state(instance)
+ return self._identity_key_from_state(state, attributes.PASSIVE_OFF)
+
+ def _identity_key_from_state(
+ self, state, passive=attributes.PASSIVE_RETURN_NO_VALUE
+ ):
+ dict_ = state.dict
+ manager = state.manager
+ return (
+ self._identity_class,
+ tuple(
+ [
+ manager[prop.key].impl.get(state, dict_, passive)
+ for prop in self._identity_key_props
+ ]
+ ),
+ state.identity_token,
+ )
+
+ def primary_key_from_instance(self, instance):
+ """Return the list of primary key values for the given
+ instance.
+
+ If the instance's state is expired, calling this method
+ will result in a database check to see if the object has been deleted.
+ If the row no longer exists,
+ :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
+
+ """
+ state = attributes.instance_state(instance)
+ identity_key = self._identity_key_from_state(
+ state, attributes.PASSIVE_OFF
+ )
+ return identity_key[1]
+
+ @HasMemoized.memoized_attribute
+ def _persistent_sortkey_fn(self):
+ key_fns = [col.type.sort_key_function for col in self.primary_key]
+
+ if set(key_fns).difference([None]):
+
+ def key(state):
+ return tuple(
+ key_fn(val) if key_fn is not None else val
+ for key_fn, val in zip(key_fns, state.key[1])
+ )
+
+ else:
+
+ def key(state):
+ return state.key[1]
+
+ return key
+
+ @HasMemoized.memoized_attribute
+ def _identity_key_props(self):
+ return [self._columntoproperty[col] for col in self.primary_key]
+
+ @HasMemoized.memoized_attribute
+ def _all_pk_cols(self):
+ collection = set()
+ for table in self.tables:
+ collection.update(self._pks_by_table[table])
+ return collection
+
+ @HasMemoized.memoized_attribute
+ def _should_undefer_in_wildcard(self):
+ cols = set(self.primary_key)
+ if self.polymorphic_on is not None:
+ cols.add(self.polymorphic_on)
+ return cols
+
+ @HasMemoized.memoized_attribute
+ def _primary_key_propkeys(self):
+ return {self._columntoproperty[col].key for col in self._all_pk_cols}
+
+ def _get_state_attr_by_column(
+ self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
+ ):
+ prop = self._columntoproperty[column]
+ return state.manager[prop.key].impl.get(state, dict_, passive=passive)
+
+ def _set_committed_state_attr_by_column(self, state, dict_, column, value):
+ prop = self._columntoproperty[column]
+ state.manager[prop.key].impl.set_committed_value(state, dict_, value)
+
+ def _set_state_attr_by_column(self, state, dict_, column, value):
+ prop = self._columntoproperty[column]
+ state.manager[prop.key].impl.set(state, dict_, value, None)
+
+ def _get_committed_attr_by_column(self, obj, column):
+ state = attributes.instance_state(obj)
+ dict_ = attributes.instance_dict(obj)
+ return self._get_committed_state_attr_by_column(
+ state, dict_, column, passive=attributes.PASSIVE_OFF
+ )
+
+ def _get_committed_state_attr_by_column(
+ self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
+ ):
+
+ prop = self._columntoproperty[column]
+ return state.manager[prop.key].impl.get_committed_value(
+ state, dict_, passive=passive
+ )
+
+ def _optimized_get_statement(self, state, attribute_names):
+ """assemble a WHERE clause which retrieves a given state by primary
+ key, using a minimized set of tables.
+
+ Applies to a joined-table inheritance mapper where the
+ requested attribute names are only present on joined tables,
+ not the base table. The WHERE clause attempts to include
+ only those tables to minimize joins.
+
+ """
+ props = self._props
+
+ col_attribute_names = set(attribute_names).intersection(
+ state.mapper.column_attrs.keys()
+ )
+ tables = set(
+ chain(
+ *[
+ sql_util.find_tables(c, check_columns=True)
+ for key in col_attribute_names
+ for c in props[key].columns
+ ]
+ )
+ )
+
+ if self.base_mapper.local_table in tables:
+ return None
+
+ def visit_binary(binary):
+ leftcol = binary.left
+ rightcol = binary.right
+ if leftcol is None or rightcol is None:
+ return
+
+ if leftcol.table not in tables:
+ leftval = self._get_committed_state_attr_by_column(
+ state,
+ state.dict,
+ leftcol,
+ passive=attributes.PASSIVE_NO_INITIALIZE,
+ )
+ if leftval in orm_util._none_set:
+ raise _OptGetColumnsNotAvailable()
+ binary.left = sql.bindparam(
+ None, leftval, type_=binary.right.type
+ )
+ elif rightcol.table not in tables:
+ rightval = self._get_committed_state_attr_by_column(
+ state,
+ state.dict,
+ rightcol,
+ passive=attributes.PASSIVE_NO_INITIALIZE,
+ )
+ if rightval in orm_util._none_set:
+ raise _OptGetColumnsNotAvailable()
+ binary.right = sql.bindparam(
+ None, rightval, type_=binary.right.type
+ )
+
+ allconds = []
+
+ start = False
+
+ # as of #7507, from the lowest base table on upwards,
+ # we include all intermediary tables.
+
+ for mapper in reversed(list(self.iterate_to_root())):
+ if mapper.local_table in tables:
+ start = True
+ elif not isinstance(mapper.local_table, expression.TableClause):
+ return None
+ if start and not mapper.single:
+ allconds.append(mapper.inherit_condition)
+ tables.add(mapper.local_table)
+
+ # only the bottom table needs its criteria to be altered to fit
+ # the primary key ident - the rest of the tables upwards to the
+ # descendant-most class should all be present and joined to each
+ # other.
+ try:
+ allconds[0] = visitors.cloned_traverse(
+ allconds[0], {}, {"binary": visit_binary}
+ )
+ except _OptGetColumnsNotAvailable:
+ return None
+
+ cond = sql.and_(*allconds)
+
+ cols = []
+ for key in col_attribute_names:
+ cols.extend(props[key].columns)
+ return (
+ sql.select(*cols)
+ .where(cond)
+ .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+ )
+
+ def _iterate_to_target_viawpoly(self, mapper):
+ if self.isa(mapper):
+ prev = self
+ for m in self.iterate_to_root():
+ yield m
+
+ if m is not prev and prev not in m._with_polymorphic_mappers:
+ break
+
+ prev = m
+ if m is mapper:
+ break
+
+ def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
+ if not enabled_via_opt:
+ # common case, takes place for all polymorphic loads
+ mapper = polymorphic_from
+ for m in self._iterate_to_target_viawpoly(mapper):
+ if m.polymorphic_load == "selectin":
+ return m
+ else:
+ # uncommon case, selectin load options were used
+ enabled_via_opt = set(enabled_via_opt)
+ enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
+ for entity in enabled_via_opt.union([polymorphic_from]):
+ mapper = entity.mapper
+ for m in self._iterate_to_target_viawpoly(mapper):
+ if (
+ m.polymorphic_load == "selectin"
+ or m in enabled_via_opt_mappers
+ ):
+ return enabled_via_opt_mappers.get(m, m)
+
+ return None
+
+ @util.preload_module("sqlalchemy.orm.strategy_options")
+ def _subclass_load_via_in(self, entity):
+ """Assemble a that can load the columns local to
+ this subclass as a SELECT with IN.
+
+ """
+ strategy_options = util.preloaded.orm_strategy_options
+
+ assert self.inherits
+
+ if self.polymorphic_on is not None:
+ polymorphic_prop = self._columntoproperty[self.polymorphic_on]
+ keep_props = set([polymorphic_prop] + self._identity_key_props)
+ else:
+ keep_props = set(self._identity_key_props)
+
+ disable_opt = strategy_options.Load(entity)
+ enable_opt = strategy_options.Load(entity)
+
+ for prop in self.attrs:
+ if prop.parent is self or prop in keep_props:
+ # "enable" options, to turn on the properties that we want to
+ # load by default (subject to options from the query)
+ if not isinstance(prop, StrategizedProperty):
+ continue
+
+ enable_opt.set_generic_strategy(
+ # convert string name to an attribute before passing
+ # to loader strategy
+ (getattr(entity.entity_namespace, prop.key),),
+ dict(prop.strategy_key),
+ )
+ else:
+ # "disable" options, to turn off the properties from the
+ # superclass that we *don't* want to load, applied after
+ # the options from the query to override them
+ disable_opt.set_generic_strategy(
+ # convert string name to an attribute before passing
+ # to loader strategy
+ (getattr(entity.entity_namespace, prop.key),),
+ {"do_nothing": True},
+ )
+
+ primary_key = [
+ sql_util._deep_annotate(pk, {"_orm_adapt": True})
+ for pk in self.primary_key
+ ]
+
+ if len(primary_key) > 1:
+ in_expr = sql.tuple_(*primary_key)
+ else:
+ in_expr = primary_key[0]
+
+ if entity.is_aliased_class:
+ assert entity.mapper is self
+
+ q = sql.select(entity).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ )
+
+ in_expr = entity._adapter.traverse(in_expr)
+ primary_key = [entity._adapter.traverse(k) for k in primary_key]
+ q = q.where(
+ in_expr.in_(sql.bindparam("primary_keys", expanding=True))
+ ).order_by(*primary_key)
+ else:
+
+ q = sql.select(self).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ )
+ q = q.where(
+ in_expr.in_(sql.bindparam("primary_keys", expanding=True))
+ ).order_by(*primary_key)
+
+ return q, enable_opt, disable_opt
+
+ @HasMemoized.memoized_attribute
+ def _subclass_load_via_in_mapper(self):
+ return self._subclass_load_via_in(self)
+
+ def cascade_iterator(self, type_, state, halt_on=None):
+ r"""Iterate each element and its mapper in an object graph,
+ for all relationships that meet the given cascade rule.
+
+ :param type\_:
+ The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
+ etc.).
+
+ .. note:: the ``"all"`` cascade is not accepted here. For a generic
+ object traversal function, see :ref:`faq_walk_objects`.
+
+ :param state:
+ The lead InstanceState. child items will be processed per
+ the relationships defined for this object's mapper.
+
+ :return: the method yields individual object instances.
+
+ .. seealso::
+
+ :ref:`unitofwork_cascades`
+
+ :ref:`faq_walk_objects` - illustrates a generic function to
+ traverse all objects without relying on cascades.
+
+ """
+ visited_states = set()
+ prp, mpp = object(), object()
+
+ assert state.mapper.isa(self)
+
+ visitables = deque(
+ [(deque(state.mapper._props.values()), prp, state, state.dict)]
+ )
+
+ while visitables:
+ iterator, item_type, parent_state, parent_dict = visitables[-1]
+ if not iterator:
+ visitables.pop()
+ continue
+
+ if item_type is prp:
+ prop = iterator.popleft()
+ if type_ not in prop.cascade:
+ continue
+ queue = deque(
+ prop.cascade_iterator(
+ type_,
+ parent_state,
+ parent_dict,
+ visited_states,
+ halt_on,
+ )
+ )
+ if queue:
+ visitables.append((queue, mpp, None, None))
+ elif item_type is mpp:
+ (
+ instance,
+ instance_mapper,
+ corresponding_state,
+ corresponding_dict,
+ ) = iterator.popleft()
+ yield (
+ instance,
+ instance_mapper,
+ corresponding_state,
+ corresponding_dict,
+ )
+ visitables.append(
+ (
+ deque(instance_mapper._props.values()),
+ prp,
+ corresponding_state,
+ corresponding_dict,
+ )
+ )
+
+ @HasMemoized.memoized_attribute
+ def _compiled_cache(self):
+ return util.LRUCache(self._compiled_cache_size)
+
+ @HasMemoized.memoized_attribute
+ def _sorted_tables(self):
+ table_to_mapper = {}
+
+ for mapper in self.base_mapper.self_and_descendants:
+ for t in mapper.tables:
+ table_to_mapper.setdefault(t, mapper)
+
+ extra_dependencies = []
+ for table, mapper in table_to_mapper.items():
+ super_ = mapper.inherits
+ if super_:
+ extra_dependencies.extend(
+ [(super_table, table) for super_table in super_.tables]
+ )
+
+ def skip(fk):
+ # attempt to skip dependencies that are not
+ # significant to the inheritance chain
+ # for two tables that are related by inheritance.
+ # while that dependency may be important, it's technically
+ # not what we mean to sort on here.
+ parent = table_to_mapper.get(fk.parent.table)
+ dep = table_to_mapper.get(fk.column.table)
+ if (
+ parent is not None
+ and dep is not None
+ and dep is not parent
+ and dep.inherit_condition is not None
+ ):
+ cols = set(sql_util._find_columns(dep.inherit_condition))
+ if parent.inherit_condition is not None:
+ cols = cols.union(
+ sql_util._find_columns(parent.inherit_condition)
+ )
+ return fk.parent not in cols and fk.column not in cols
+ else:
+ return fk.parent not in cols
+ return False
+
+ sorted_ = sql_util.sort_tables(
+ table_to_mapper,
+ skip_fn=skip,
+ extra_dependencies=extra_dependencies,
+ )
+
+ ret = util.OrderedDict()
+ for t in sorted_:
+ ret[t] = table_to_mapper[t]
+ return ret
+
+ def _memo(self, key, callable_):
+ if key in self._memoized_values:
+ return self._memoized_values[key]
+ else:
+ self._memoized_values[key] = value = callable_()
+ return value
+
+ @util.memoized_property
+ def _table_to_equated(self):
+ """memoized map of tables to collections of columns to be
+ synchronized upwards to the base mapper."""
+
+ result = util.defaultdict(list)
+
+ for table in self._sorted_tables:
+ cols = set(table.c)
+ for m in self.iterate_to_root():
+ if m._inherits_equated_pairs and cols.intersection(
+ util.reduce(
+ set.union,
+ [l.proxy_set for l, r in m._inherits_equated_pairs],
+ )
+ ):
+ result[table].append((m, m._inherits_equated_pairs))
+
+ return result
+
+
+class _OptGetColumnsNotAvailable(Exception):
+ pass
+
+
+def configure_mappers():
+ """Initialize the inter-mapper relationships of all mappers that
+ have been constructed thus far across all :class:`_orm.registry`
+ collections.
+
+ The configure step is used to reconcile and initialize the
+ :func:`_orm.relationship` linkages between mapped classes, as well as to
+ invoke configuration events such as the
+ :meth:`_orm.MapperEvents.before_configured` and
+ :meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
+ extensions or user-defined extension hooks.
+
+ Mapper configuration is normally invoked automatically, the first time
+ mappings from a particular :class:`_orm.registry` are used, as well as
+ whenever mappings are used and additional not-yet-configured mappers have
+ been constructed. The automatic configuration process however is local only
+ to the :class:`_orm.registry` involving the target mapper and any related
+ :class:`_orm.registry` objects which it may depend on; this is
+ equivalent to invoking the :meth:`_orm.registry.configure` method
+ on a particular :class:`_orm.registry`.
+
+ By contrast, the :func:`_orm.configure_mappers` function will invoke the
+ configuration process on all :class:`_orm.registry` objects that
+ exist in memory, and may be useful for scenarios where many individual
+ :class:`_orm.registry` objects that are nonetheless interrelated are
+ in use.
+
+ .. versionchanged:: 1.4
+
+ As of SQLAlchemy 1.4.0b2, this function works on a
+ per-:class:`_orm.registry` basis, locating all :class:`_orm.registry`
+ objects present and invoking the :meth:`_orm.registry.configure` method
+ on each. The :meth:`_orm.registry.configure` method may be preferred to
+ limit the configuration of mappers to those local to a particular
+ :class:`_orm.registry` and/or declarative base class.
+
+ Points at which automatic configuration is invoked include when a mapped
+ class is instantiated into an instance, as well as when ORM queries
+ are emitted using :meth:`.Session.query` or :meth:`_orm.Session.execute`
+ with an ORM-enabled statement.
+
+ The mapper configure process, whether invoked by
+ :func:`_orm.configure_mappers` or from :meth:`_orm.registry.configure`,
+ provides several event hooks that can be used to augment the mapper
+ configuration step. These hooks include:
+
+ * :meth:`.MapperEvents.before_configured` - called once before
+ :func:`.configure_mappers` or :meth:`_orm.registry.configure` does any
+ work; this can be used to establish additional options, properties, or
+ related mappings before the operation proceeds.
+
+ * :meth:`.MapperEvents.mapper_configured` - called as each individual
+ :class:`_orm.Mapper` is configured within the process; will include all
+ mapper state except for backrefs set up by other mappers that are still
+ to be configured.
+
+ * :meth:`.MapperEvents.after_configured` - called once after
+ :func:`.configure_mappers` or :meth:`_orm.registry.configure` is
+ complete; at this stage, all :class:`_orm.Mapper` objects that fall
+ within the scope of the configuration operation will be fully configured.
+ Note that the calling application may still have other mappings that
+ haven't been produced yet, such as if they are in modules as yet
+ unimported, and may also have mappings that are still to be configured,
+ if they are in other :class:`_orm.registry` collections not part of the
+ current scope of configuration.
+
+ """
+
+ _configure_registries(_all_registries(), cascade=True)
+
+
+def _configure_registries(registries, cascade):
+ for reg in registries:
+ if reg._new_mappers:
+ break
+ else:
+ return
+
+ with _CONFIGURE_MUTEX:
+ global _already_compiling
+ if _already_compiling:
+ return
+ _already_compiling = True
+ try:
+
+ # double-check inside mutex
+ for reg in registries:
+ if reg._new_mappers:
+ break
+ else:
+ return
+
+ Mapper.dispatch._for_class(Mapper).before_configured()
+ # initialize properties on all mappers
+ # note that _mapper_registry is unordered, which
+ # may randomly conceal/reveal issues related to
+ # the order of mapper compilation
+
+ _do_configure_registries(registries, cascade)
+ finally:
+ _already_compiling = False
+ Mapper.dispatch._for_class(Mapper).after_configured()
+
+
+@util.preload_module("sqlalchemy.orm.decl_api")
+def _do_configure_registries(registries, cascade):
+
+ registry = util.preloaded.orm_decl_api.registry
+
+ orig = set(registries)
+
+ for reg in registry._recurse_with_dependencies(registries):
+ has_skip = False
+
+ for mapper in reg._mappers_to_configure():
+ run_configure = None
+ for fn in mapper.dispatch.before_mapper_configured:
+ run_configure = fn(mapper, mapper.class_)
+ if run_configure is EXT_SKIP:
+ has_skip = True
+ break
+ if run_configure is EXT_SKIP:
+ continue
+
+ if getattr(mapper, "_configure_failed", False):
+ e = sa_exc.InvalidRequestError(
+ "One or more mappers failed to initialize - "
+ "can't proceed with initialization of other "
+ "mappers. Triggering mapper: '%s'. "
+ "Original exception was: %s"
+ % (mapper, mapper._configure_failed)
+ )
+ e._configure_failed = mapper._configure_failed
+ raise e
+
+ if not mapper.configured:
+ try:
+ mapper._post_configure_properties()
+ mapper._expire_memoizations()
+ mapper.dispatch.mapper_configured(mapper, mapper.class_)
+ except Exception:
+ exc = sys.exc_info()[1]
+ if not hasattr(exc, "_configure_failed"):
+ mapper._configure_failed = exc
+ raise
+ if not has_skip:
+ reg._new_mappers = False
+
+ if not cascade and reg._dependencies.difference(orig):
+ raise sa_exc.InvalidRequestError(
+ "configure was called with cascade=False but "
+ "additional registries remain"
+ )
+
+
+@util.preload_module("sqlalchemy.orm.decl_api")
+def _dispose_registries(registries, cascade):
+
+ registry = util.preloaded.orm_decl_api.registry
+
+ orig = set(registries)
+
+ for reg in registry._recurse_with_dependents(registries):
+ if not cascade and reg._dependents.difference(orig):
+ raise sa_exc.InvalidRequestError(
+ "Registry has dependent registries that are not disposed; "
+ "pass cascade=True to clear these also"
+ )
+
+ while reg._managers:
+ try:
+ manager, _ = reg._managers.popitem()
+ except KeyError:
+ # guard against race between while and popitem
+ pass
+ else:
+ reg._dispose_manager_and_mapper(manager)
+
+ reg._non_primary_mappers.clear()
+ reg._dependents.clear()
+ for dep in reg._dependencies:
+ dep._dependents.discard(reg)
+ reg._dependencies.clear()
+ # this wasn't done in the 1.3 clear_mappers() and in fact it
+ # was a bug, as it could cause configure_mappers() to invoke
+ # the "before_configured" event even though mappers had all been
+ # disposed.
+ reg._new_mappers = False
+
+
+def reconstructor(fn):
+ """Decorate a method as the 'reconstructor' hook.
+
+ Designates a single method as the "reconstructor", an ``__init__``-like
+ method that will be called by the ORM after the instance has been
+ loaded from the database or otherwise reconstituted.
+
+ The reconstructor will be invoked with no arguments. Scalar
+ (non-collection) database-mapped attributes of the instance will
+ be available for use within the function. Eagerly-loaded
+ collections are generally not yet available and will usually only
+ contain the first element. ORM state changes made to objects at
+ this stage will not be recorded for the next flush() operation, so
+ the activity within a reconstructor should be conservative.
+
+ .. seealso::
+
+ :ref:`mapping_constructors`
+
+ :meth:`.InstanceEvents.load`
+
+ """
+ fn.__sa_reconstructor__ = True
+ return fn
+
+
+def validates(*names, **kw):
+ r"""Decorate a method as a 'validator' for one or more named properties.
+
+ Designates a method as a validator, a method which receives the
+ name of the attribute as well as a value to be assigned, or in the
+ case of a collection, the value to be added to the collection.
+ The function can then raise validation exceptions to halt the
+ process from continuing (where Python's built-in ``ValueError``
+ and ``AssertionError`` exceptions are reasonable choices), or can
+ modify or replace the value before proceeding. The function should
+ otherwise return the given value.
+
+ Note that a validator for a collection **cannot** issue a load of that
+ collection within the validation routine - this usage raises
+ an assertion to avoid recursion overflows. This is a reentrant
+ condition which is not supported.
+
+ :param \*names: list of attribute names to be validated.
+ :param include_removes: if True, "remove" events will be
+ sent as well - the validation function must accept an additional
+ argument "is_remove" which will be a boolean.
+
+ :param include_backrefs: defaults to ``True``; if ``False``, the
+ validation function will not emit if the originator is an attribute
+ event related via a backref. This can be used for bi-directional
+ :func:`.validates` usage where only one validator should emit per
+ attribute operation.
+
+ .. versionadded:: 0.9.0
+
+ .. seealso::
+
+ :ref:`simple_validators` - usage examples for :func:`.validates`
+
+ """
+ include_removes = kw.pop("include_removes", False)
+ include_backrefs = kw.pop("include_backrefs", True)
+
+ def wrap(fn):
+ fn.__sa_validators__ = names
+ fn.__sa_validation_opts__ = {
+ "include_removes": include_removes,
+ "include_backrefs": include_backrefs,
+ }
+ return fn
+
+ return wrap
+
+
+def _event_on_load(state, ctx):
+ instrumenting_mapper = state.manager.mapper
+
+ if instrumenting_mapper._reconstructor:
+ instrumenting_mapper._reconstructor(state.obj())
+
+
+def _event_on_init(state, args, kwargs):
+ """Run init_instance hooks.
+
+ This also includes mapper compilation, normally not needed
+ here but helps with some piecemeal configuration
+ scenarios (such as in the ORM tutorial).
+
+ """
+
+ instrumenting_mapper = state.manager.mapper
+ if instrumenting_mapper:
+ instrumenting_mapper._check_configure()
+ if instrumenting_mapper._set_polymorphic_identity:
+ instrumenting_mapper._set_polymorphic_identity(state)
+
+
+class _ColumnMapping(dict):
+ """Error reporting helper for mapper._columntoproperty."""
+
+ __slots__ = ("mapper",)
+
+ def __init__(self, mapper):
+ # TODO: weakref would be a good idea here
+ self.mapper = mapper
+
+ def __missing__(self, column):
+ prop = self.mapper._props.get(column)
+ if prop:
+ raise orm_exc.UnmappedColumnError(
+ "Column '%s.%s' is not available, due to "
+ "conflicting property '%s':%r"
+ % (column.table.name, column.name, column.key, prop)
+ )
+ raise orm_exc.UnmappedColumnError(
+ "No column %s is configured on mapper %s..."
+ % (column, self.mapper)
+ )
diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py
new file mode 100644
index 0000000..331ddd7
--- /dev/null
+++ b/lib/sqlalchemy/orm/path_registry.py
@@ -0,0 +1,519 @@
+# orm/path_registry.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+"""Path tracking utilities, representing mapper graph traversals.
+
+"""
+
+from itertools import chain
+import logging
+
+from . import base as orm_base
+from .. import exc
+from .. import inspection
+from .. import util
+from ..sql import visitors
+from ..sql.traversals import HasCacheKey
+
+log = logging.getLogger(__name__)
+
+
+def _unreduce_path(path):
+ return PathRegistry.deserialize(path)
+
+
+_WILDCARD_TOKEN = "*"
+_DEFAULT_TOKEN = "_sa_default"
+
+
+class PathRegistry(HasCacheKey):
+ """Represent query load paths and registry functions.
+
+ Basically represents structures like:
+
+ (<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
+
+ These structures are generated by things like
+ query options (joinedload(), subqueryload(), etc.) and are
+ used to compose keys stored in the query._attributes dictionary
+ for various options.
+
+ They are then re-composed at query compile/result row time as
+ the query is formed and as rows are fetched, where they again
+ serve to compose keys to look up options in the context.attributes
+ dictionary, which is copied from query._attributes.
+
+ The path structure has a limited amount of caching, where each
+ "root" ultimately pulls from a fixed registry associated with
+ the first mapper, that also contains elements for each of its
+ property keys. However paths longer than two elements, which
+ are the exception rather than the rule, are generated on an
+ as-needed basis.
+
+ """
+
+ __slots__ = ()
+
+ is_token = False
+ is_root = False
+
+ _cache_key_traversal = [
+ ("path", visitors.ExtendedInternalTraversal.dp_has_cache_key_list)
+ ]
+
+ def __eq__(self, other):
+ try:
+ return other is not None and self.path == other._path_for_compare
+ except AttributeError:
+ util.warn(
+ "Comparison of PathRegistry to %r is not supported"
+ % (type(other))
+ )
+ return False
+
+ def __ne__(self, other):
+ try:
+ return other is None or self.path != other._path_for_compare
+ except AttributeError:
+ util.warn(
+ "Comparison of PathRegistry to %r is not supported"
+ % (type(other))
+ )
+ return True
+
+ @property
+ def _path_for_compare(self):
+ return self.path
+
+ def set(self, attributes, key, value):
+ log.debug("set '%s' on path '%s' to '%s'", key, self, value)
+ attributes[(key, self.natural_path)] = value
+
+ def setdefault(self, attributes, key, value):
+ log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
+ attributes.setdefault((key, self.natural_path), value)
+
+ def get(self, attributes, key, value=None):
+ key = (key, self.natural_path)
+ if key in attributes:
+ return attributes[key]
+ else:
+ return value
+
+ def __len__(self):
+ return len(self.path)
+
+ def __hash__(self):
+ return id(self)
+
+ @property
+ def length(self):
+ return len(self.path)
+
+ def pairs(self):
+ path = self.path
+ for i in range(0, len(path), 2):
+ yield path[i], path[i + 1]
+
+ def contains_mapper(self, mapper):
+ for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]:
+ if path_mapper.is_mapper and path_mapper.isa(mapper):
+ return True
+ else:
+ return False
+
+ def contains(self, attributes, key):
+ return (key, self.path) in attributes
+
+ def __reduce__(self):
+ return _unreduce_path, (self.serialize(),)
+
+ @classmethod
+ def _serialize_path(cls, path):
+ return list(
+ zip(
+ [
+ m.class_ if (m.is_mapper or m.is_aliased_class) else str(m)
+ for m in [path[i] for i in range(0, len(path), 2)]
+ ],
+ [
+ path[i].key if (path[i].is_property) else str(path[i])
+ for i in range(1, len(path), 2)
+ ]
+ + [None],
+ )
+ )
+
+ @classmethod
+ def _deserialize_path(cls, path):
+ def _deserialize_mapper_token(mcls):
+ return (
+ # note: we likely dont want configure=True here however
+ # this is maintained at the moment for backwards compatibility
+ orm_base._inspect_mapped_class(mcls, configure=True)
+ if mcls not in PathToken._intern
+ else PathToken._intern[mcls]
+ )
+
+ def _deserialize_key_token(mcls, key):
+ if key is None:
+ return None
+ elif key in PathToken._intern:
+ return PathToken._intern[key]
+ else:
+ return orm_base._inspect_mapped_class(
+ mcls, configure=True
+ ).attrs[key]
+
+ p = tuple(
+ chain(
+ *[
+ (
+ _deserialize_mapper_token(mcls),
+ _deserialize_key_token(mcls, key),
+ )
+ for mcls, key in path
+ ]
+ )
+ )
+ if p and p[-1] is None:
+ p = p[0:-1]
+ return p
+
+ @classmethod
+ def serialize_context_dict(cls, dict_, tokens):
+ return [
+ ((key, cls._serialize_path(path)), value)
+ for (key, path), value in [
+ (k, v)
+ for k, v in dict_.items()
+ if isinstance(k, tuple) and k[0] in tokens
+ ]
+ ]
+
+ @classmethod
+ def deserialize_context_dict(cls, serialized):
+ return util.OrderedDict(
+ ((key, tuple(cls._deserialize_path(path))), value)
+ for (key, path), value in serialized
+ )
+
+ def serialize(self):
+ path = self.path
+ return self._serialize_path(path)
+
+ @classmethod
+ def deserialize(cls, path):
+ if path is None:
+ return None
+ p = cls._deserialize_path(path)
+ return cls.coerce(p)
+
+ @classmethod
+ def per_mapper(cls, mapper):
+ if mapper.is_mapper:
+ return CachingEntityRegistry(cls.root, mapper)
+ else:
+ return SlotsEntityRegistry(cls.root, mapper)
+
+ @classmethod
+ def coerce(cls, raw):
+ return util.reduce(lambda prev, next: prev[next], raw, cls.root)
+
+ def token(self, token):
+ if token.endswith(":" + _WILDCARD_TOKEN):
+ return TokenRegistry(self, token)
+ elif token.endswith(":" + _DEFAULT_TOKEN):
+ return TokenRegistry(self.root, token)
+ else:
+ raise exc.ArgumentError("invalid token: %s" % token)
+
+ def __add__(self, other):
+ return util.reduce(lambda prev, next: prev[next], other.path, self)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.path)
+
+
+class RootRegistry(PathRegistry):
+ """Root registry, defers to mappers so that
+ paths are maintained per-root-mapper.
+
+ """
+
+ inherit_cache = True
+
+ path = natural_path = ()
+ has_entity = False
+ is_aliased_class = False
+ is_root = True
+
+ def __getitem__(self, entity):
+ if entity in PathToken._intern:
+ return PathToken._intern[entity]
+ else:
+ return entity._path_registry
+
+
+PathRegistry.root = RootRegistry()
+
+
+class PathToken(orm_base.InspectionAttr, HasCacheKey, str):
+ """cacheable string token"""
+
+ _intern = {}
+
+ def _gen_cache_key(self, anon_map, bindparams):
+ return (str(self),)
+
+ @property
+ def _path_for_compare(self):
+ return None
+
+ @classmethod
+ def intern(cls, strvalue):
+ if strvalue in cls._intern:
+ return cls._intern[strvalue]
+ else:
+ cls._intern[strvalue] = result = PathToken(strvalue)
+ return result
+
+
+class TokenRegistry(PathRegistry):
+ __slots__ = ("token", "parent", "path", "natural_path")
+
+ inherit_cache = True
+
+ def __init__(self, parent, token):
+ token = PathToken.intern(token)
+
+ self.token = token
+ self.parent = parent
+ self.path = parent.path + (token,)
+ self.natural_path = parent.natural_path + (token,)
+
+ has_entity = False
+
+ is_token = True
+
+ def generate_for_superclasses(self):
+ if not self.parent.is_aliased_class and not self.parent.is_root:
+ for ent in self.parent.mapper.iterate_to_root():
+ yield TokenRegistry(self.parent.parent[ent], self.token)
+ elif (
+ self.parent.is_aliased_class
+ and self.parent.entity._is_with_polymorphic
+ ):
+ yield self
+ for ent in self.parent.entity._with_polymorphic_entities:
+ yield TokenRegistry(self.parent.parent[ent], self.token)
+ else:
+ yield self
+
+ def __getitem__(self, entity):
+ raise NotImplementedError()
+
+
+class PropRegistry(PathRegistry):
+ is_unnatural = False
+ inherit_cache = True
+
+ def __init__(self, parent, prop):
+ # restate this path in terms of the
+ # given MapperProperty's parent.
+ insp = inspection.inspect(parent[-1])
+ natural_parent = parent
+
+ if not insp.is_aliased_class or insp._use_mapper_path:
+ parent = natural_parent = parent.parent[prop.parent]
+ elif (
+ insp.is_aliased_class
+ and insp.with_polymorphic_mappers
+ and prop.parent in insp.with_polymorphic_mappers
+ ):
+ subclass_entity = parent[-1]._entity_for_mapper(prop.parent)
+ parent = parent.parent[subclass_entity]
+
+ # when building a path where with_polymorphic() is in use,
+ # special logic to determine the "natural path" when subclass
+ # entities are used.
+ #
+ # here we are trying to distinguish between a path that starts
+ # on a the with_polymorhpic entity vs. one that starts on a
+ # normal entity that introduces a with_polymorphic() in the
+ # middle using of_type():
+ #
+ # # as in test_polymorphic_rel->
+ # # test_subqueryload_on_subclass_uses_path_correctly
+ # wp = with_polymorphic(RegularEntity, "*")
+ # sess.query(wp).options(someload(wp.SomeSubEntity.foos))
+ #
+ # vs
+ #
+ # # as in test_relationship->JoinedloadWPolyOfTypeContinued
+ # wp = with_polymorphic(SomeFoo, "*")
+ # sess.query(RegularEntity).options(
+ # someload(RegularEntity.foos.of_type(wp))
+ # .someload(wp.SubFoo.bar)
+ # )
+ #
+ # in the former case, the Query as it generates a path that we
+ # want to match will be in terms of the with_polymorphic at the
+ # beginning. in the latter case, Query will generate simple
+ # paths that don't know about this with_polymorphic, so we must
+ # use a separate natural path.
+ #
+ #
+ if parent.parent:
+ natural_parent = parent.parent[subclass_entity.mapper]
+ self.is_unnatural = True
+ else:
+ natural_parent = parent
+ elif (
+ natural_parent.parent
+ and insp.is_aliased_class
+ and prop.parent # this should always be the case here
+ is not insp.mapper
+ and insp.mapper.isa(prop.parent)
+ ):
+ natural_parent = parent.parent[prop.parent]
+
+ self.prop = prop
+ self.parent = parent
+ self.path = parent.path + (prop,)
+ self.natural_path = natural_parent.natural_path + (prop,)
+
+ self._wildcard_path_loader_key = (
+ "loader",
+ parent.path + self.prop._wildcard_token,
+ )
+ self._default_path_loader_key = self.prop._default_path_loader_key
+ self._loader_key = ("loader", self.natural_path)
+
+ def __str__(self):
+ return " -> ".join(str(elem) for elem in self.path)
+
+ @util.memoized_property
+ def has_entity(self):
+ return self.prop._links_to_entity
+
+ @util.memoized_property
+ def entity(self):
+ return self.prop.entity
+
+ @property
+ def mapper(self):
+ return self.prop.mapper
+
+ @property
+ def entity_path(self):
+ return self[self.entity]
+
+ def __getitem__(self, entity):
+ if isinstance(entity, (int, slice)):
+ return self.path[entity]
+ else:
+ return SlotsEntityRegistry(self, entity)
+
+
+class AbstractEntityRegistry(PathRegistry):
+ __slots__ = ()
+
+ has_entity = True
+
+ def __init__(self, parent, entity):
+ self.key = entity
+ self.parent = parent
+ self.is_aliased_class = entity.is_aliased_class
+ self.entity = entity
+ self.path = parent.path + (entity,)
+
+ # the "natural path" is the path that we get when Query is traversing
+ # from the lead entities into the various relationships; it corresponds
+ # to the structure of mappers and relationships. when we are given a
+ # path that comes from loader options, as of 1.3 it can have ac-hoc
+ # with_polymorphic() and other AliasedInsp objects inside of it, which
+ # are usually not present in mappings. So here we track both the
+ # "enhanced" path in self.path and the "natural" path that doesn't
+ # include those objects so these two traversals can be matched up.
+
+ # the test here for "(self.is_aliased_class or parent.is_unnatural)"
+ # are to avoid the more expensive conditional logic that follows if we
+ # know we don't have to do it. This conditional can just as well be
+ # "if parent.path:", it just is more function calls.
+ if parent.path and (self.is_aliased_class or parent.is_unnatural):
+ # this is an infrequent code path used only for loader strategies
+ # that also make use of of_type().
+ if entity.mapper.isa(parent.natural_path[-1].entity):
+ self.natural_path = parent.natural_path + (entity.mapper,)
+ else:
+ self.natural_path = parent.natural_path + (
+ parent.natural_path[-1].entity,
+ )
+ # it seems to make sense that since these paths get mixed up
+ # with statements that are cached or not, we should make
+ # sure the natural path is cacheable across different occurrences
+ # of equivalent AliasedClass objects. however, so far this
+ # does not seem to be needed for whatever reason.
+ # elif not parent.path and self.is_aliased_class:
+ # self.natural_path = (self.entity._generate_cache_key()[0], )
+ else:
+ # self.natural_path = parent.natural_path + (entity, )
+ self.natural_path = self.path
+
+ @property
+ def entity_path(self):
+ return self
+
+ @property
+ def mapper(self):
+ return inspection.inspect(self.entity).mapper
+
+ def __bool__(self):
+ return True
+
+ __nonzero__ = __bool__
+
+ def __getitem__(self, entity):
+ if isinstance(entity, (int, slice)):
+ return self.path[entity]
+ elif entity in PathToken._intern:
+ return TokenRegistry(self, PathToken._intern[entity])
+ else:
+ return PropRegistry(self, entity)
+
+
+class SlotsEntityRegistry(AbstractEntityRegistry):
+ # for aliased class, return lightweight, no-cycles created
+ # version
+ inherit_cache = True
+
+ __slots__ = (
+ "key",
+ "parent",
+ "is_aliased_class",
+ "entity",
+ "path",
+ "natural_path",
+ )
+
+
+class CachingEntityRegistry(AbstractEntityRegistry, dict):
+ # for long lived mapper, return dict based caching
+ # version that creates reference cycles
+
+ inherit_cache = True
+
+ def __getitem__(self, entity):
+ if isinstance(entity, (int, slice)):
+ return self.path[entity]
+ else:
+ return dict.__getitem__(self, entity)
+
+ def __missing__(self, key):
+ self[key] = item = PropRegistry(self, key)
+
+ return item
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
new file mode 100644
index 0000000..a17b24a
--- /dev/null
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -0,0 +1,2517 @@
+# orm/persistence.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""private module containing functions used to emit INSERT, UPDATE
+and DELETE statements on behalf of a :class:`_orm.Mapper` and its descending
+mappers.
+
+The functions here are called only by the unit of work functions
+in unitofwork.py.
+
+"""
+
+from itertools import chain
+from itertools import groupby
+import operator
+
+from . import attributes
+from . import evaluator
+from . import exc as orm_exc
+from . import loading
+from . import sync
+from .base import NO_VALUE
+from .base import state_str
+from .. import exc as sa_exc
+from .. import future
+from .. import sql
+from .. import util
+from ..engine import result as _result
+from ..sql import coercions
+from ..sql import expression
+from ..sql import operators
+from ..sql import roles
+from ..sql import select
+from ..sql import sqltypes
+from ..sql.base import _entity_namespace_key
+from ..sql.base import CompileState
+from ..sql.base import Options
+from ..sql.dml import DeleteDMLState
+from ..sql.dml import InsertDMLState
+from ..sql.dml import UpdateDMLState
+from ..sql.elements import BooleanClauseList
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+
+
+def _bulk_insert(
+ mapper,
+ mappings,
+ session_transaction,
+ isstates,
+ return_defaults,
+ render_nulls,
+):
+ base_mapper = mapper.base_mapper
+
+ if session_transaction.session.connection_callable:
+ raise NotImplementedError(
+ "connection_callable / per-instance sharding "
+ "not supported in bulk_insert()"
+ )
+
+ if isstates:
+ if return_defaults:
+ states = [(state, state.dict) for state in mappings]
+ mappings = [dict_ for (state, dict_) in states]
+ else:
+ mappings = [state.dict for state in mappings]
+ else:
+ mappings = list(mappings)
+
+ connection = session_transaction.connection(base_mapper)
+ for table, super_mapper in base_mapper._sorted_tables.items():
+ if not mapper.isa(super_mapper):
+ continue
+
+ records = (
+ (
+ None,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ )
+ for (
+ state,
+ state_dict,
+ params,
+ mp,
+ conn,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ ) in _collect_insert_commands(
+ table,
+ ((None, mapping, mapper, connection) for mapping in mappings),
+ bulk=True,
+ return_defaults=return_defaults,
+ render_nulls=render_nulls,
+ )
+ )
+ _emit_insert_statements(
+ base_mapper,
+ None,
+ super_mapper,
+ table,
+ records,
+ bookkeeping=return_defaults,
+ )
+
+ if return_defaults and isstates:
+ identity_cls = mapper._identity_class
+ identity_props = [p.key for p in mapper._identity_key_props]
+ for state, dict_ in states:
+ state.key = (
+ identity_cls,
+ tuple([dict_[key] for key in identity_props]),
+ )
+
+
+def _bulk_update(
+ mapper, mappings, session_transaction, isstates, update_changed_only
+):
+ base_mapper = mapper.base_mapper
+
+ search_keys = mapper._primary_key_propkeys
+ if mapper._version_id_prop:
+ search_keys = {mapper._version_id_prop.key}.union(search_keys)
+
+ def _changed_dict(mapper, state):
+ return dict(
+ (k, v)
+ for k, v in state.dict.items()
+ if k in state.committed_state or k in search_keys
+ )
+
+ if isstates:
+ if update_changed_only:
+ mappings = [_changed_dict(mapper, state) for state in mappings]
+ else:
+ mappings = [state.dict for state in mappings]
+ else:
+ mappings = list(mappings)
+
+ if session_transaction.session.connection_callable:
+ raise NotImplementedError(
+ "connection_callable / per-instance sharding "
+ "not supported in bulk_update()"
+ )
+
+ connection = session_transaction.connection(base_mapper)
+
+ for table, super_mapper in base_mapper._sorted_tables.items():
+ if not mapper.isa(super_mapper):
+ continue
+
+ records = _collect_update_commands(
+ None,
+ table,
+ (
+ (
+ None,
+ mapping,
+ mapper,
+ connection,
+ (
+ mapping[mapper._version_id_prop.key]
+ if mapper._version_id_prop
+ else None
+ ),
+ )
+ for mapping in mappings
+ ),
+ bulk=True,
+ )
+
+ _emit_update_statements(
+ base_mapper,
+ None,
+ super_mapper,
+ table,
+ records,
+ bookkeeping=False,
+ )
+
+
+def save_obj(base_mapper, states, uowtransaction, single=False):
+ """Issue ``INSERT`` and/or ``UPDATE`` statements for a list
+ of objects.
+
+ This is called within the context of a UOWTransaction during a
+ flush operation, given a list of states to be flushed. The
+ base mapper in an inheritance hierarchy handles the inserts/
+ updates for all descendant mappers.
+
+ """
+
+ # if batch=false, call _save_obj separately for each object
+ if not single and not base_mapper.batch:
+ for state in _sort_states(base_mapper, states):
+ save_obj(base_mapper, [state], uowtransaction, single=True)
+ return
+
+ states_to_update = []
+ states_to_insert = []
+
+ for (
+ state,
+ dict_,
+ mapper,
+ connection,
+ has_identity,
+ row_switch,
+ update_version_id,
+ ) in _organize_states_for_save(base_mapper, states, uowtransaction):
+ if has_identity or row_switch:
+ states_to_update.append(
+ (state, dict_, mapper, connection, update_version_id)
+ )
+ else:
+ states_to_insert.append((state, dict_, mapper, connection))
+
+ for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+ insert = _collect_insert_commands(table, states_to_insert)
+
+ update = _collect_update_commands(
+ uowtransaction, table, states_to_update
+ )
+
+ _emit_update_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ update,
+ )
+
+ _emit_insert_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ insert,
+ )
+
+ _finalize_insert_update_commands(
+ base_mapper,
+ uowtransaction,
+ chain(
+ (
+ (state, state_dict, mapper, connection, False)
+ for (state, state_dict, mapper, connection) in states_to_insert
+ ),
+ (
+ (state, state_dict, mapper, connection, True)
+ for (
+ state,
+ state_dict,
+ mapper,
+ connection,
+ update_version_id,
+ ) in states_to_update
+ ),
+ ),
+ )
+
+
+def post_update(base_mapper, states, uowtransaction, post_update_cols):
+ """Issue UPDATE statements on behalf of a relationship() which
+ specifies post_update.
+
+ """
+
+ states_to_update = list(
+ _organize_states_for_post_update(base_mapper, states, uowtransaction)
+ )
+
+ for table, mapper in base_mapper._sorted_tables.items():
+ if table not in mapper._pks_by_table:
+ continue
+
+ update = (
+ (
+ state,
+ state_dict,
+ sub_mapper,
+ connection,
+ mapper._get_committed_state_attr_by_column(
+ state, state_dict, mapper.version_id_col
+ )
+ if mapper.version_id_col is not None
+ else None,
+ )
+ for state, state_dict, sub_mapper, connection in states_to_update
+ if table in sub_mapper._pks_by_table
+ )
+
+ update = _collect_post_update_commands(
+ base_mapper, uowtransaction, table, update, post_update_cols
+ )
+
+ _emit_post_update_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ update,
+ )
+
+
+def delete_obj(base_mapper, states, uowtransaction):
+ """Issue ``DELETE`` statements for a list of objects.
+
+ This is called within the context of a UOWTransaction during a
+ flush operation.
+
+ """
+
+ states_to_delete = list(
+ _organize_states_for_delete(base_mapper, states, uowtransaction)
+ )
+
+ table_to_mapper = base_mapper._sorted_tables
+
+ for table in reversed(list(table_to_mapper.keys())):
+ mapper = table_to_mapper[table]
+ if table not in mapper._pks_by_table:
+ continue
+ elif mapper.inherits and mapper.passive_deletes:
+ continue
+
+ delete = _collect_delete_commands(
+ base_mapper, uowtransaction, table, states_to_delete
+ )
+
+ _emit_delete_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ delete,
+ )
+
+ for (
+ state,
+ state_dict,
+ mapper,
+ connection,
+ update_version_id,
+ ) in states_to_delete:
+ mapper.dispatch.after_delete(mapper, connection, state)
+
+
+def _organize_states_for_save(base_mapper, states, uowtransaction):
+ """Make an initial pass across a set of states for INSERT or
+ UPDATE.
+
+ This includes splitting out into distinct lists for
+ each, calling before_insert/before_update, obtaining
+ key information for each state including its dictionary,
+ mapper, the connection to use for the execution per state,
+ and the identity flag.
+
+ """
+
+ for state, dict_, mapper, connection in _connections_for_states(
+ base_mapper, uowtransaction, states
+ ):
+
+ has_identity = bool(state.key)
+
+ instance_key = state.key or mapper._identity_key_from_state(state)
+
+ row_switch = update_version_id = None
+
+ # call before_XXX extensions
+ if not has_identity:
+ mapper.dispatch.before_insert(mapper, connection, state)
+ else:
+ mapper.dispatch.before_update(mapper, connection, state)
+
+ if mapper._validate_polymorphic_identity:
+ mapper._validate_polymorphic_identity(mapper, state, dict_)
+
+ # detect if we have a "pending" instance (i.e. has
+ # no instance_key attached to it), and another instance
+ # with the same identity key already exists as persistent.
+ # convert to an UPDATE if so.
+ if (
+ not has_identity
+ and instance_key in uowtransaction.session.identity_map
+ ):
+ instance = uowtransaction.session.identity_map[instance_key]
+ existing = attributes.instance_state(instance)
+
+ if not uowtransaction.was_already_deleted(existing):
+ if not uowtransaction.is_deleted(existing):
+ util.warn(
+ "New instance %s with identity key %s conflicts "
+ "with persistent instance %s"
+ % (state_str(state), instance_key, state_str(existing))
+ )
+ else:
+ base_mapper._log_debug(
+ "detected row switch for identity %s. "
+ "will update %s, remove %s from "
+ "transaction",
+ instance_key,
+ state_str(state),
+ state_str(existing),
+ )
+
+ # remove the "delete" flag from the existing element
+ uowtransaction.remove_state_actions(existing)
+ row_switch = existing
+
+ if (has_identity or row_switch) and mapper.version_id_col is not None:
+ update_version_id = mapper._get_committed_state_attr_by_column(
+ row_switch if row_switch else state,
+ row_switch.dict if row_switch else dict_,
+ mapper.version_id_col,
+ )
+
+ yield (
+ state,
+ dict_,
+ mapper,
+ connection,
+ has_identity,
+ row_switch,
+ update_version_id,
+ )
+
+
+def _organize_states_for_post_update(base_mapper, states, uowtransaction):
+ """Make an initial pass across a set of states for UPDATE
+ corresponding to post_update.
+
+ This includes obtaining key information for each state
+ including its dictionary, mapper, the connection to use for
+ the execution per state.
+
+ """
+ return _connections_for_states(base_mapper, uowtransaction, states)
+
+
+def _organize_states_for_delete(base_mapper, states, uowtransaction):
+ """Make an initial pass across a set of states for DELETE.
+
+ This includes calling out before_delete and obtaining
+ key information for each state including its dictionary,
+ mapper, the connection to use for the execution per state.
+
+ """
+ for state, dict_, mapper, connection in _connections_for_states(
+ base_mapper, uowtransaction, states
+ ):
+
+ mapper.dispatch.before_delete(mapper, connection, state)
+
+ if mapper.version_id_col is not None:
+ update_version_id = mapper._get_committed_state_attr_by_column(
+ state, dict_, mapper.version_id_col
+ )
+ else:
+ update_version_id = None
+
+ yield (state, dict_, mapper, connection, update_version_id)
+
+
+def _collect_insert_commands(
+ table,
+ states_to_insert,
+ bulk=False,
+ return_defaults=False,
+ render_nulls=False,
+):
+ """Identify sets of values to use in INSERT statements for a
+ list of states.
+
+ """
+ for state, state_dict, mapper, connection in states_to_insert:
+ if table not in mapper._pks_by_table:
+ continue
+
+ params = {}
+ value_params = {}
+
+ propkey_to_col = mapper._propkey_to_col[table]
+
+ eval_none = mapper._insert_cols_evaluating_none[table]
+
+ for propkey in set(propkey_to_col).intersection(state_dict):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+ if value is None and col not in eval_none and not render_nulls:
+ continue
+ elif not bulk and (
+ hasattr(value, "__clause_element__")
+ or isinstance(value, sql.ClauseElement)
+ ):
+ value_params[col] = (
+ value.__clause_element__()
+ if hasattr(value, "__clause_element__")
+ else value
+ )
+ else:
+ params[col.key] = value
+
+ if not bulk:
+ # for all the columns that have no default and we don't have
+ # a value and where "None" is not a special value, add
+ # explicit None to the INSERT. This is a legacy behavior
+ # which might be worth removing, as it should not be necessary
+ # and also produces confusion, given that "missing" and None
+ # now have distinct meanings
+ for colkey in (
+ mapper._insert_cols_as_none[table]
+ .difference(params)
+ .difference([c.key for c in value_params])
+ ):
+ params[colkey] = None
+
+ if not bulk or return_defaults:
+ # params are in terms of Column key objects, so
+ # compare to pk_keys_by_table
+ has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
+
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = mapper._server_default_cols[table].issubset(
+ params
+ )
+ else:
+ has_all_defaults = True
+ else:
+ has_all_defaults = has_all_pks = True
+
+ if (
+ mapper.version_id_generator is not False
+ and mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+ params[mapper.version_id_col.key] = mapper.version_id_generator(
+ None
+ )
+
+ yield (
+ state,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ )
+
+
+def _collect_update_commands(
+ uowtransaction, table, states_to_update, bulk=False
+):
+ """Identify sets of values to use in UPDATE statements for a
+ list of states.
+
+ This function works intricately with the history system
+ to determine exactly what values should be updated
+ as well as how the row should be matched within an UPDATE
+ statement. Includes some tricky scenarios where the primary
+ key of an object might have been changed.
+
+ """
+
+ for (
+ state,
+ state_dict,
+ mapper,
+ connection,
+ update_version_id,
+ ) in states_to_update:
+
+ if table not in mapper._pks_by_table:
+ continue
+
+ pks = mapper._pks_by_table[table]
+
+ value_params = {}
+
+ propkey_to_col = mapper._propkey_to_col[table]
+
+ if bulk:
+ # keys here are mapped attribute keys, so
+ # look at mapper attribute keys for pk
+ params = dict(
+ (propkey_to_col[propkey].key, state_dict[propkey])
+ for propkey in set(propkey_to_col)
+ .intersection(state_dict)
+ .difference(mapper._pk_attr_keys_by_table[table])
+ )
+ has_all_defaults = True
+ else:
+ params = {}
+ for propkey in set(propkey_to_col).intersection(
+ state.committed_state
+ ):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+
+ if hasattr(value, "__clause_element__") or isinstance(
+ value, sql.ClauseElement
+ ):
+ value_params[col] = (
+ value.__clause_element__()
+ if hasattr(value, "__clause_element__")
+ else value
+ )
+ # guard against values that generate non-__nonzero__
+ # objects for __eq__()
+ elif (
+ state.manager[propkey].impl.is_equal(
+ value, state.committed_state[propkey]
+ )
+ is not True
+ ):
+ params[col.key] = value
+
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = (
+ mapper._server_onupdate_default_cols[table]
+ ).issubset(params)
+ else:
+ has_all_defaults = True
+
+ if (
+ update_version_id is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+
+ if not bulk and not (params or value_params):
+ # HACK: check for history in other tables, in case the
+ # history is only in a different table than the one
+ # where the version_id_col is. This logic was lost
+ # from 0.9 -> 1.0.0 and restored in 1.0.6.
+ for prop in mapper._columntoproperty.values():
+ history = state.manager[prop.key].impl.get_history(
+ state, state_dict, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if history.added:
+ break
+ else:
+ # no net change, break
+ continue
+
+ col = mapper.version_id_col
+ no_params = not params and not value_params
+ params[col._label] = update_version_id
+
+ if (
+ bulk or col.key not in params
+ ) and mapper.version_id_generator is not False:
+ val = mapper.version_id_generator(update_version_id)
+ params[col.key] = val
+ elif mapper.version_id_generator is False and no_params:
+ # no version id generator, no values set on the table,
+ # and version id wasn't manually incremented.
+ # set version id to itself so we get an UPDATE
+ # statement
+ params[col.key] = update_version_id
+
+ elif not (params or value_params):
+ continue
+
+ has_all_pks = True
+ expect_pk_cascaded = False
+ if bulk:
+ # keys here are mapped attribute keys, so
+ # look at mapper attribute keys for pk
+ pk_params = dict(
+ (propkey_to_col[propkey]._label, state_dict.get(propkey))
+ for propkey in set(propkey_to_col).intersection(
+ mapper._pk_attr_keys_by_table[table]
+ )
+ )
+ else:
+ pk_params = {}
+ for col in pks:
+ propkey = mapper._columntoproperty[col].key
+
+ history = state.manager[propkey].impl.get_history(
+ state, state_dict, attributes.PASSIVE_OFF
+ )
+
+ if history.added:
+ if (
+ not history.deleted
+ or ("pk_cascaded", state, col)
+ in uowtransaction.attributes
+ ):
+ expect_pk_cascaded = True
+ pk_params[col._label] = history.added[0]
+ params.pop(col.key, None)
+ else:
+ # else, use the old value to locate the row
+ pk_params[col._label] = history.deleted[0]
+ if col in value_params:
+ has_all_pks = False
+ else:
+ pk_params[col._label] = history.unchanged[0]
+ if pk_params[col._label] is None:
+ raise orm_exc.FlushError(
+ "Can't update table %s using NULL for primary "
+ "key value on column %s" % (table, col)
+ )
+
+ if params or value_params:
+ params.update(pk_params)
+ yield (
+ state,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_defaults,
+ has_all_pks,
+ )
+ elif expect_pk_cascaded:
+ # no UPDATE occurs on this table, but we expect that CASCADE rules
+ # have changed the primary key of the row; propagate this event to
+ # other columns that expect to have been modified. this normally
+ # occurs after the UPDATE is emitted however we invoke it here
+ # explicitly in the absence of our invoking an UPDATE
+ for m, equated_pairs in mapper._table_to_equated[table]:
+ sync.populate(
+ state,
+ m,
+ state,
+ m,
+ equated_pairs,
+ uowtransaction,
+ mapper.passive_updates,
+ )
+
+
+def _collect_post_update_commands(
+ base_mapper, uowtransaction, table, states_to_update, post_update_cols
+):
+ """Identify sets of values to use in UPDATE statements for a
+ list of states within a post_update operation.
+
+ """
+
+ for (
+ state,
+ state_dict,
+ mapper,
+ connection,
+ update_version_id,
+ ) in states_to_update:
+
+ # assert table in mapper._pks_by_table
+
+ pks = mapper._pks_by_table[table]
+ params = {}
+ hasdata = False
+
+ for col in mapper._cols_by_table[table]:
+ if col in pks:
+ params[col._label] = mapper._get_state_attr_by_column(
+ state, state_dict, col, passive=attributes.PASSIVE_OFF
+ )
+
+ elif col in post_update_cols or col.onupdate is not None:
+ prop = mapper._columntoproperty[col]
+ history = state.manager[prop.key].impl.get_history(
+ state, state_dict, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if history.added:
+ value = history.added[0]
+ params[col.key] = value
+ hasdata = True
+ if hasdata:
+ if (
+ update_version_id is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+
+ col = mapper.version_id_col
+ params[col._label] = update_version_id
+
+ if (
+ bool(state.key)
+ and col.key not in params
+ and mapper.version_id_generator is not False
+ ):
+ val = mapper.version_id_generator(update_version_id)
+ params[col.key] = val
+ yield state, state_dict, mapper, connection, params
+
+
+def _collect_delete_commands(
+ base_mapper, uowtransaction, table, states_to_delete
+):
+ """Identify values to use in DELETE statements for a list of
+ states to be deleted."""
+
+ for (
+ state,
+ state_dict,
+ mapper,
+ connection,
+ update_version_id,
+ ) in states_to_delete:
+
+ if table not in mapper._pks_by_table:
+ continue
+
+ params = {}
+ for col in mapper._pks_by_table[table]:
+ params[
+ col.key
+ ] = value = mapper._get_committed_state_attr_by_column(
+ state, state_dict, col
+ )
+ if value is None:
+ raise orm_exc.FlushError(
+ "Can't delete from table %s "
+ "using NULL for primary "
+ "key value on column %s" % (table, col)
+ )
+
+ if (
+ update_version_id is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+ params[mapper.version_id_col.key] = update_version_id
+ yield params, connection
+
+
+def _emit_update_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ update,
+ bookkeeping=True,
+):
+ """Emit UPDATE statements corresponding to value lists collected
+ by _collect_update_commands()."""
+
+ needs_version_id = (
+ mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ )
+
+ execution_options = {"compiled_cache": base_mapper._compiled_cache}
+
+ def update_stmt():
+ clauses = BooleanClauseList._construct_raw(operators.and_)
+
+ for col in mapper._pks_by_table[table]:
+ clauses.clauses.append(
+ col == sql.bindparam(col._label, type_=col.type)
+ )
+
+ if needs_version_id:
+ clauses.clauses.append(
+ mapper.version_id_col
+ == sql.bindparam(
+ mapper.version_id_col._label,
+ type_=mapper.version_id_col.type,
+ )
+ )
+
+ stmt = table.update().where(clauses)
+ return stmt
+
+ cached_stmt = base_mapper._memo(("update", table), update_stmt)
+
+ for (
+ (connection, paramkeys, hasvalue, has_all_defaults, has_all_pks),
+ records,
+ ) in groupby(
+ update,
+ lambda rec: (
+ rec[4], # connection
+ set(rec[2]), # set of parameter keys
+ bool(rec[5]), # whether or not we have "value" parameters
+ rec[6], # has_all_defaults
+ rec[7], # has all pks
+ ),
+ ):
+ rows = 0
+ records = list(records)
+
+ statement = cached_stmt
+ return_defaults = False
+
+ if not has_all_pks:
+ statement = statement.return_defaults()
+ return_defaults = True
+ elif (
+ bookkeeping
+ and not has_all_defaults
+ and mapper.base_mapper.eager_defaults
+ ):
+ statement = statement.return_defaults()
+ return_defaults = True
+ elif mapper.version_id_col is not None:
+ statement = statement.return_defaults(mapper.version_id_col)
+ return_defaults = True
+
+ assert_singlerow = (
+ connection.dialect.supports_sane_rowcount
+ if not return_defaults
+ else connection.dialect.supports_sane_rowcount_returning
+ )
+
+ assert_multirow = (
+ assert_singlerow
+ and connection.dialect.supports_sane_multi_rowcount
+ )
+ allow_multirow = has_all_defaults and not needs_version_id
+
+ if hasvalue:
+ for (
+ state,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_defaults,
+ has_all_pks,
+ ) in records:
+ c = connection._execute_20(
+ statement.values(value_params),
+ params,
+ execution_options=execution_options,
+ )
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params,
+ True,
+ c.returned_defaults,
+ )
+ rows += c.rowcount
+ check_rowcount = assert_singlerow
+ else:
+ if not allow_multirow:
+ check_rowcount = assert_singlerow
+ for (
+ state,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_defaults,
+ has_all_pks,
+ ) in records:
+ c = connection._execute_20(
+ statement, params, execution_options=execution_options
+ )
+
+ # TODO: why with bookkeeping=False?
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params,
+ True,
+ c.returned_defaults,
+ )
+ rows += c.rowcount
+ else:
+ multiparams = [rec[2] for rec in records]
+
+ check_rowcount = assert_multirow or (
+ assert_singlerow and len(multiparams) == 1
+ )
+
+ c = connection._execute_20(
+ statement, multiparams, execution_options=execution_options
+ )
+
+ rows += c.rowcount
+
+ for (
+ state,
+ state_dict,
+ params,
+ mapper,
+ connection,
+ value_params,
+ has_all_defaults,
+ has_all_pks,
+ ) in records:
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params,
+ True,
+ c.returned_defaults
+ if not c.context.executemany
+ else None,
+ )
+
+ if check_rowcount:
+ if rows != len(records):
+ raise orm_exc.StaleDataError(
+ "UPDATE statement on table '%s' expected to "
+ "update %d row(s); %d were matched."
+ % (table.description, len(records), rows)
+ )
+
+ elif needs_version_id:
+ util.warn(
+ "Dialect %s does not support updated rowcount "
+ "- versioning cannot be verified."
+ % c.dialect.dialect_description
+ )
+
+
+def _emit_insert_statements(
+ base_mapper,
+ uowtransaction,
+ mapper,
+ table,
+ insert,
+ bookkeeping=True,
+):
+ """Emit INSERT statements corresponding to value lists collected
+ by _collect_insert_commands()."""
+
+ cached_stmt = base_mapper._memo(("insert", table), table.insert)
+
+ execution_options = {"compiled_cache": base_mapper._compiled_cache}
+
+ for (
+ (connection, pkeys, hasvalue, has_all_pks, has_all_defaults),
+ records,
+ ) in groupby(
+ insert,
+ lambda rec: (
+ rec[4], # connection
+ set(rec[2]), # parameter keys
+ bool(rec[5]), # whether we have "value" parameters
+ rec[6],
+ rec[7],
+ ),
+ ):
+
+ statement = cached_stmt
+
+ if (
+ not bookkeeping
+ or (
+ has_all_defaults
+ or not base_mapper.eager_defaults
+ or not connection.dialect.implicit_returning
+ )
+ and has_all_pks
+ and not hasvalue
+ ):
+ # the "we don't need newly generated values back" section.
+ # here we have all the PKs, all the defaults or we don't want
+ # to fetch them, or the dialect doesn't support RETURNING at all
+ # so we have to post-fetch / use lastrowid anyway.
+ records = list(records)
+ multiparams = [rec[2] for rec in records]
+
+ c = connection._execute_20(
+ statement, multiparams, execution_options=execution_options
+ )
+
+ if bookkeeping:
+ for (
+ (
+ state,
+ state_dict,
+ params,
+ mapper_rec,
+ conn,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ ),
+ last_inserted_params,
+ ) in zip(records, c.context.compiled_parameters):
+ if state:
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ last_inserted_params,
+ value_params,
+ False,
+ c.returned_defaults
+ if not c.context.executemany
+ else None,
+ )
+ else:
+ _postfetch_bulk_save(mapper_rec, state_dict, table)
+
+ else:
+ # here, we need defaults and/or pk values back.
+
+ records = list(records)
+ if (
+ not hasvalue
+ and connection.dialect.insert_executemany_returning
+ and len(records) > 1
+ ):
+ do_executemany = True
+ else:
+ do_executemany = False
+
+ if not has_all_defaults and base_mapper.eager_defaults:
+ statement = statement.return_defaults()
+ elif mapper.version_id_col is not None:
+ statement = statement.return_defaults(mapper.version_id_col)
+ elif do_executemany:
+ statement = statement.return_defaults(*table.primary_key)
+
+ if do_executemany:
+ multiparams = [rec[2] for rec in records]
+
+ c = connection._execute_20(
+ statement, multiparams, execution_options=execution_options
+ )
+
+ if bookkeeping:
+ for (
+ (
+ state,
+ state_dict,
+ params,
+ mapper_rec,
+ conn,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ ),
+ last_inserted_params,
+ inserted_primary_key,
+ returned_defaults,
+ ) in util.zip_longest(
+ records,
+ c.context.compiled_parameters,
+ c.inserted_primary_key_rows,
+ c.returned_defaults_rows or (),
+ ):
+ if inserted_primary_key is None:
+ # this is a real problem and means that we didn't
+ # get back as many PK rows. we can't continue
+ # since this indicates PK rows were missing, which
+ # means we likely mis-populated records starting
+ # at that point with incorrectly matched PK
+ # values.
+ raise orm_exc.FlushError(
+ "Multi-row INSERT statement for %s did not "
+ "produce "
+ "the correct number of INSERTed rows for "
+ "RETURNING. Ensure there are no triggers or "
+ "special driver issues preventing INSERT from "
+ "functioning properly." % mapper_rec
+ )
+
+ for pk, col in zip(
+ inserted_primary_key,
+ mapper._pks_by_table[table],
+ ):
+ prop = mapper_rec._columntoproperty[col]
+ if state_dict.get(prop.key) is None:
+ state_dict[prop.key] = pk
+
+ if state:
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ last_inserted_params,
+ value_params,
+ False,
+ returned_defaults,
+ )
+ else:
+ _postfetch_bulk_save(mapper_rec, state_dict, table)
+ else:
+ for (
+ state,
+ state_dict,
+ params,
+ mapper_rec,
+ connection,
+ value_params,
+ has_all_pks,
+ has_all_defaults,
+ ) in records:
+ if value_params:
+ result = connection._execute_20(
+ statement.values(value_params),
+ params,
+ execution_options=execution_options,
+ )
+ else:
+ result = connection._execute_20(
+ statement,
+ params,
+ execution_options=execution_options,
+ )
+
+ primary_key = result.inserted_primary_key
+ if primary_key is None:
+ raise orm_exc.FlushError(
+ "Single-row INSERT statement for %s "
+ "did not produce a "
+ "new primary key result "
+ "being invoked. Ensure there are no triggers or "
+ "special driver issues preventing INSERT from "
+ "functioning properly." % (mapper_rec,)
+ )
+ for pk, col in zip(
+ primary_key, mapper._pks_by_table[table]
+ ):
+ prop = mapper_rec._columntoproperty[col]
+ if (
+ col in value_params
+ or state_dict.get(prop.key) is None
+ ):
+ state_dict[prop.key] = pk
+ if bookkeeping:
+ if state:
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ result,
+ result.context.compiled_parameters[0],
+ value_params,
+ False,
+ result.returned_defaults
+ if not result.context.executemany
+ else None,
+ )
+ else:
+ _postfetch_bulk_save(mapper_rec, state_dict, table)
+
+
+def _emit_post_update_statements(
+ base_mapper, uowtransaction, mapper, table, update
+):
+ """Emit UPDATE statements corresponding to value lists collected
+ by _collect_post_update_commands()."""
+
+ execution_options = {"compiled_cache": base_mapper._compiled_cache}
+
+ needs_version_id = (
+ mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ )
+
+ def update_stmt():
+ clauses = BooleanClauseList._construct_raw(operators.and_)
+
+ for col in mapper._pks_by_table[table]:
+ clauses.clauses.append(
+ col == sql.bindparam(col._label, type_=col.type)
+ )
+
+ if needs_version_id:
+ clauses.clauses.append(
+ mapper.version_id_col
+ == sql.bindparam(
+ mapper.version_id_col._label,
+ type_=mapper.version_id_col.type,
+ )
+ )
+
+ stmt = table.update().where(clauses)
+
+ if mapper.version_id_col is not None:
+ stmt = stmt.return_defaults(mapper.version_id_col)
+
+ return stmt
+
+ statement = base_mapper._memo(("post_update", table), update_stmt)
+
+ # execute each UPDATE in the order according to the original
+ # list of states to guarantee row access order, but
+ # also group them into common (connection, cols) sets
+ # to support executemany().
+ for key, records in groupby(
+ update,
+ lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
+ ):
+ rows = 0
+
+ records = list(records)
+ connection = key[0]
+
+ assert_singlerow = (
+ connection.dialect.supports_sane_rowcount
+ if mapper.version_id_col is None
+ else connection.dialect.supports_sane_rowcount_returning
+ )
+ assert_multirow = (
+ assert_singlerow
+ and connection.dialect.supports_sane_multi_rowcount
+ )
+ allow_multirow = not needs_version_id or assert_multirow
+
+ if not allow_multirow:
+ check_rowcount = assert_singlerow
+ for state, state_dict, mapper_rec, connection, params in records:
+
+ c = connection._execute_20(
+ statement, params, execution_options=execution_options
+ )
+
+ _postfetch_post_update(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ )
+ rows += c.rowcount
+ else:
+ multiparams = [
+ params
+ for state, state_dict, mapper_rec, conn, params in records
+ ]
+
+ check_rowcount = assert_multirow or (
+ assert_singlerow and len(multiparams) == 1
+ )
+
+ c = connection._execute_20(
+ statement, multiparams, execution_options=execution_options
+ )
+
+ rows += c.rowcount
+ for state, state_dict, mapper_rec, connection, params in records:
+ _postfetch_post_update(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ )
+
+ if check_rowcount:
+ if rows != len(records):
+ raise orm_exc.StaleDataError(
+ "UPDATE statement on table '%s' expected to "
+ "update %d row(s); %d were matched."
+ % (table.description, len(records), rows)
+ )
+
+ elif needs_version_id:
+ util.warn(
+ "Dialect %s does not support updated rowcount "
+ "- versioning cannot be verified."
+ % c.dialect.dialect_description
+ )
+
+
+def _emit_delete_statements(
+ base_mapper, uowtransaction, mapper, table, delete
+):
+ """Emit DELETE statements corresponding to value lists collected
+ by _collect_delete_commands()."""
+
+ need_version_id = (
+ mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ )
+
+ def delete_stmt():
+ clauses = BooleanClauseList._construct_raw(operators.and_)
+
+ for col in mapper._pks_by_table[table]:
+ clauses.clauses.append(
+ col == sql.bindparam(col.key, type_=col.type)
+ )
+
+ if need_version_id:
+ clauses.clauses.append(
+ mapper.version_id_col
+ == sql.bindparam(
+ mapper.version_id_col.key, type_=mapper.version_id_col.type
+ )
+ )
+
+ return table.delete().where(clauses)
+
+ statement = base_mapper._memo(("delete", table), delete_stmt)
+ for connection, recs in groupby(delete, lambda rec: rec[1]): # connection
+ del_objects = [params for params, connection in recs]
+
+ execution_options = {"compiled_cache": base_mapper._compiled_cache}
+ expected = len(del_objects)
+ rows_matched = -1
+ only_warn = False
+
+ if (
+ need_version_id
+ and not connection.dialect.supports_sane_multi_rowcount
+ ):
+ if connection.dialect.supports_sane_rowcount:
+ rows_matched = 0
+ # execute deletes individually so that versioned
+ # rows can be verified
+ for params in del_objects:
+
+ c = connection._execute_20(
+ statement, params, execution_options=execution_options
+ )
+ rows_matched += c.rowcount
+ else:
+ util.warn(
+ "Dialect %s does not support deleted rowcount "
+ "- versioning cannot be verified."
+ % connection.dialect.dialect_description
+ )
+ connection._execute_20(
+ statement, del_objects, execution_options=execution_options
+ )
+ else:
+ c = connection._execute_20(
+ statement, del_objects, execution_options=execution_options
+ )
+
+ if not need_version_id:
+ only_warn = True
+
+ rows_matched = c.rowcount
+
+ if (
+ base_mapper.confirm_deleted_rows
+ and rows_matched > -1
+ and expected != rows_matched
+ and (
+ connection.dialect.supports_sane_multi_rowcount
+ or len(del_objects) == 1
+ )
+ ):
+ # TODO: why does this "only warn" if versioning is turned off,
+ # whereas the UPDATE raises?
+ if only_warn:
+ util.warn(
+ "DELETE statement on table '%s' expected to "
+ "delete %d row(s); %d were matched. Please set "
+ "confirm_deleted_rows=False within the mapper "
+ "configuration to prevent this warning."
+ % (table.description, expected, rows_matched)
+ )
+ else:
+ raise orm_exc.StaleDataError(
+ "DELETE statement on table '%s' expected to "
+ "delete %d row(s); %d were matched. Please set "
+ "confirm_deleted_rows=False within the mapper "
+ "configuration to prevent this warning."
+ % (table.description, expected, rows_matched)
+ )
+
+
+def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
+ """finalize state on states that have been inserted or updated,
+ including calling after_insert/after_update events.
+
+ """
+ for state, state_dict, mapper, connection, has_identity in states:
+
+ if mapper._readonly_props:
+ readonly = state.unmodified_intersection(
+ [
+ p.key
+ for p in mapper._readonly_props
+ if (
+ p.expire_on_flush
+ and (not p.deferred or p.key in state.dict)
+ )
+ or (
+ not p.expire_on_flush
+ and not p.deferred
+ and p.key not in state.dict
+ )
+ ]
+ )
+ if readonly:
+ state._expire_attributes(state.dict, readonly)
+
+ # if eager_defaults option is enabled, load
+ # all expired cols. Else if we have a version_id_col, make sure
+ # it isn't expired.
+ toload_now = []
+
+ if base_mapper.eager_defaults:
+ toload_now.extend(
+ state._unloaded_non_object.intersection(
+ mapper._server_default_plus_onupdate_propkeys
+ )
+ )
+
+ if (
+ mapper.version_id_col is not None
+ and mapper.version_id_generator is False
+ ):
+ if mapper._version_id_prop.key in state.unloaded:
+ toload_now.extend([mapper._version_id_prop.key])
+
+ if toload_now:
+ state.key = base_mapper._identity_key_from_state(state)
+ stmt = future.select(mapper).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ )
+ loading.load_on_ident(
+ uowtransaction.session,
+ stmt,
+ state.key,
+ refresh_state=state,
+ only_load_props=toload_now,
+ )
+
+ # call after_XXX extensions
+ if not has_identity:
+ mapper.dispatch.after_insert(mapper, connection, state)
+ else:
+ mapper.dispatch.after_update(mapper, connection, state)
+
+ if (
+ mapper.version_id_generator is False
+ and mapper.version_id_col is not None
+ ):
+ if state_dict[mapper._version_id_prop.key] is None:
+ raise orm_exc.FlushError(
+ "Instance does not contain a non-NULL version value"
+ )
+
+
+def _postfetch_post_update(
+ mapper, uowtransaction, table, state, dict_, result, params
+):
+ if uowtransaction.is_deleted(state):
+ return
+
+ prefetch_cols = result.context.compiled.prefetch
+ postfetch_cols = result.context.compiled.postfetch
+
+ if (
+ mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+ prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
+
+ refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
+ if refresh_flush:
+ load_evt_attrs = []
+
+ for c in prefetch_cols:
+ if c.key in params and c in mapper._columntoproperty:
+ dict_[mapper._columntoproperty[c].key] = params[c.key]
+ if refresh_flush:
+ load_evt_attrs.append(mapper._columntoproperty[c].key)
+
+ if refresh_flush and load_evt_attrs:
+ mapper.class_manager.dispatch.refresh_flush(
+ state, uowtransaction, load_evt_attrs
+ )
+
+ if postfetch_cols:
+ state._expire_attributes(
+ state.dict,
+ [
+ mapper._columntoproperty[c].key
+ for c in postfetch_cols
+ if c in mapper._columntoproperty
+ ],
+ )
+
+
+def _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ dict_,
+ result,
+ params,
+ value_params,
+ isupdate,
+ returned_defaults,
+):
+ """Expire attributes in need of newly persisted database state,
+ after an INSERT or UPDATE statement has proceeded for that
+ state."""
+
+ prefetch_cols = result.context.compiled.prefetch
+ postfetch_cols = result.context.compiled.postfetch
+ returning_cols = result.context.compiled.returning
+
+ if (
+ mapper.version_id_col is not None
+ and mapper.version_id_col in mapper._cols_by_table[table]
+ ):
+ prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
+
+ refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
+ if refresh_flush:
+ load_evt_attrs = []
+
+ if returning_cols:
+ row = returned_defaults
+ if row is not None:
+ for row_value, col in zip(row, returning_cols):
+ # pk cols returned from insert are handled
+ # distinctly, don't step on the values here
+ if col.primary_key and result.context.isinsert:
+ continue
+
+ # note that columns can be in the "return defaults" that are
+ # not mapped to this mapper, typically because they are
+ # "excluded", which can be specified directly or also occurs
+ # when using declarative w/ single table inheritance
+ prop = mapper._columntoproperty.get(col)
+ if prop:
+ dict_[prop.key] = row_value
+ if refresh_flush:
+ load_evt_attrs.append(prop.key)
+
+ for c in prefetch_cols:
+ if c.key in params and c in mapper._columntoproperty:
+ dict_[mapper._columntoproperty[c].key] = params[c.key]
+ if refresh_flush:
+ load_evt_attrs.append(mapper._columntoproperty[c].key)
+
+ if refresh_flush and load_evt_attrs:
+ mapper.class_manager.dispatch.refresh_flush(
+ state, uowtransaction, load_evt_attrs
+ )
+
+ if isupdate and value_params:
+ # explicitly suit the use case specified by
+ # [ticket:3801], PK SQL expressions for UPDATE on non-RETURNING
+ # database which are set to themselves in order to do a version bump.
+ postfetch_cols.extend(
+ [
+ col
+ for col in value_params
+ if col.primary_key and col not in returning_cols
+ ]
+ )
+
+ if postfetch_cols:
+ state._expire_attributes(
+ state.dict,
+ [
+ mapper._columntoproperty[c].key
+ for c in postfetch_cols
+ if c in mapper._columntoproperty
+ ],
+ )
+
+ # synchronize newly inserted ids from one table to the next
+ # TODO: this still goes a little too often. would be nice to
+ # have definitive list of "columns that changed" here
+ for m, equated_pairs in mapper._table_to_equated[table]:
+ sync.populate(
+ state,
+ m,
+ state,
+ m,
+ equated_pairs,
+ uowtransaction,
+ mapper.passive_updates,
+ )
+
+
+def _postfetch_bulk_save(mapper, dict_, table):
+ for m, equated_pairs in mapper._table_to_equated[table]:
+ sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
+
+
+def _connections_for_states(base_mapper, uowtransaction, states):
+ """Return an iterator of (state, state.dict, mapper, connection).
+
+ The states are sorted according to _sort_states, then paired
+ with the connection they should be using for the given
+ unit of work transaction.
+
+ """
+ # if session has a connection callable,
+ # organize individual states with the connection
+ # to use for update
+ if uowtransaction.session.connection_callable:
+ connection_callable = uowtransaction.session.connection_callable
+ else:
+ connection = uowtransaction.transaction.connection(base_mapper)
+ connection_callable = None
+
+ for state in _sort_states(base_mapper, states):
+ if connection_callable:
+ connection = connection_callable(base_mapper, state.obj())
+
+ mapper = state.manager.mapper
+
+ yield state, state.dict, mapper, connection
+
+
+def _sort_states(mapper, states):
+ pending = set(states)
+ persistent = set(s for s in pending if s.key is not None)
+ pending.difference_update(persistent)
+
+ try:
+ persistent_sorted = sorted(
+ persistent, key=mapper._persistent_sortkey_fn
+ )
+ except TypeError as err:
+ util.raise_(
+ sa_exc.InvalidRequestError(
+ "Could not sort objects by primary key; primary key "
+ "values must be sortable in Python (was: %s)" % err
+ ),
+ replace_context=err,
+ )
+ return (
+ sorted(pending, key=operator.attrgetter("insert_order"))
+ + persistent_sorted
+ )
+
+
+_EMPTY_DICT = util.immutabledict()
+
+
+class BulkUDCompileState(CompileState):
+ class default_update_options(Options):
+ _synchronize_session = "evaluate"
+ _autoflush = True
+ _subject_mapper = None
+ _resolved_values = _EMPTY_DICT
+ _resolved_keys_as_propnames = _EMPTY_DICT
+ _value_evaluators = _EMPTY_DICT
+ _matched_objects = None
+ _matched_rows = None
+ _refresh_identity_token = None
+
+ @classmethod
+ def orm_pre_session_exec(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ is_reentrant_invoke,
+ ):
+ if is_reentrant_invoke:
+ return statement, execution_options
+
+ (
+ update_options,
+ execution_options,
+ ) = BulkUDCompileState.default_update_options.from_execution_options(
+ "_sa_orm_update_options",
+ {"synchronize_session"},
+ execution_options,
+ statement._execution_options,
+ )
+
+ sync = update_options._synchronize_session
+ if sync is not None:
+ if sync not in ("evaluate", "fetch", False):
+ raise sa_exc.ArgumentError(
+ "Valid strategies for session synchronization "
+ "are 'evaluate', 'fetch', False"
+ )
+
+ bind_arguments["clause"] = statement
+ try:
+ plugin_subject = statement._propagate_attrs["plugin_subject"]
+ except KeyError:
+ assert False, "statement had 'orm' plugin but no plugin_subject"
+ else:
+ bind_arguments["mapper"] = plugin_subject.mapper
+
+ update_options += {"_subject_mapper": plugin_subject.mapper}
+
+ if update_options._autoflush:
+ session._autoflush()
+
+ statement = statement._annotate(
+ {"synchronize_session": update_options._synchronize_session}
+ )
+
+ # this stage of the execution is called before the do_orm_execute event
+ # hook. meaning for an extension like horizontal sharding, this step
+ # happens before the extension splits out into multiple backends and
+ # runs only once. if we do pre_sync_fetch, we execute a SELECT
+ # statement, which the horizontal sharding extension splits amongst the
+ # shards and combines the results together.
+
+ if update_options._synchronize_session == "evaluate":
+ update_options = cls._do_pre_synchronize_evaluate(
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ update_options,
+ )
+ elif update_options._synchronize_session == "fetch":
+ update_options = cls._do_pre_synchronize_fetch(
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ update_options,
+ )
+
+ return (
+ statement,
+ util.immutabledict(execution_options).union(
+ {"_sa_orm_update_options": update_options}
+ ),
+ )
+
+ @classmethod
+ def orm_setup_cursor_result(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ result,
+ ):
+
+ # this stage of the execution is called after the
+ # do_orm_execute event hook. meaning for an extension like
+ # horizontal sharding, this step happens *within* the horizontal
+ # sharding event handler which calls session.execute() re-entrantly
+ # and will occur for each backend individually.
+ # the sharding extension then returns its own merged result from the
+ # individual ones we return here.
+
+ update_options = execution_options["_sa_orm_update_options"]
+ if update_options._synchronize_session == "evaluate":
+ cls._do_post_synchronize_evaluate(session, result, update_options)
+ elif update_options._synchronize_session == "fetch":
+ cls._do_post_synchronize_fetch(session, result, update_options)
+
+ return result
+
+ @classmethod
+ def _adjust_for_extra_criteria(cls, global_attributes, ext_info):
+ """Apply extra criteria filtering.
+
+ For all distinct single-table-inheritance mappers represented in the
+ table being updated or deleted, produce additional WHERE criteria such
+ that only the appropriate subtypes are selected from the total results.
+
+ Additionally, add WHERE criteria originating from LoaderCriteriaOptions
+ collected from the statement.
+
+ """
+
+ return_crit = ()
+
+ adapter = ext_info._adapter if ext_info.is_aliased_class else None
+
+ if (
+ "additional_entity_criteria",
+ ext_info.mapper,
+ ) in global_attributes:
+ return_crit += tuple(
+ ae._resolve_where_criteria(ext_info)
+ for ae in global_attributes[
+ ("additional_entity_criteria", ext_info.mapper)
+ ]
+ if ae.include_aliases or ae.entity is ext_info
+ )
+
+ if ext_info.mapper._single_table_criterion is not None:
+ return_crit += (ext_info.mapper._single_table_criterion,)
+
+ if adapter:
+ return_crit = tuple(adapter.traverse(crit) for crit in return_crit)
+
+ return return_crit
+
+ @classmethod
+ def _do_pre_synchronize_evaluate(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ update_options,
+ ):
+ mapper = update_options._subject_mapper
+ target_cls = mapper.class_
+
+ value_evaluators = resolved_keys_as_propnames = _EMPTY_DICT
+
+ try:
+ evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
+ crit = ()
+ if statement._where_criteria:
+ crit += statement._where_criteria
+
+ global_attributes = {}
+ for opt in statement._with_options:
+ if opt._is_criteria_option:
+ opt.get_global_criteria(global_attributes)
+
+ if global_attributes:
+ crit += cls._adjust_for_extra_criteria(
+ global_attributes, mapper
+ )
+
+ if crit:
+ eval_condition = evaluator_compiler.process(*crit)
+ else:
+
+ def eval_condition(obj):
+ return True
+
+ except evaluator.UnevaluatableError as err:
+ util.raise_(
+ sa_exc.InvalidRequestError(
+ 'Could not evaluate current criteria in Python: "%s". '
+ "Specify 'fetch' or False for the "
+ "synchronize_session execution option." % err
+ ),
+ from_=err,
+ )
+
+ if statement.__visit_name__ == "lambda_element":
+ # ._resolved is called on every LambdaElement in order to
+ # generate the cache key, so this access does not add
+ # additional expense
+ effective_statement = statement._resolved
+ else:
+ effective_statement = statement
+
+ if effective_statement.__visit_name__ == "update":
+ resolved_values = cls._get_resolved_values(
+ mapper, effective_statement
+ )
+ value_evaluators = {}
+ resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
+ mapper, resolved_values
+ )
+ for key, value in resolved_keys_as_propnames:
+ try:
+ _evaluator = evaluator_compiler.process(
+ coercions.expect(roles.ExpressionElementRole, value)
+ )
+ except evaluator.UnevaluatableError:
+ pass
+ else:
+ value_evaluators[key] = _evaluator
+
+ # TODO: detect when the where clause is a trivial primary key match.
+ matched_objects = [
+ state.obj()
+ for state in session.identity_map.all_states()
+ if state.mapper.isa(mapper)
+ and not state.expired
+ and eval_condition(state.obj())
+ and (
+ update_options._refresh_identity_token is None
+ # TODO: coverage for the case where horizontal sharding
+ # invokes an update() or delete() given an explicit identity
+ # token up front
+ or state.identity_token
+ == update_options._refresh_identity_token
+ )
+ ]
+ return update_options + {
+ "_matched_objects": matched_objects,
+ "_value_evaluators": value_evaluators,
+ "_resolved_keys_as_propnames": resolved_keys_as_propnames,
+ }
+
+ @classmethod
+ def _get_resolved_values(cls, mapper, statement):
+ if statement._multi_values:
+ return []
+ elif statement._ordered_values:
+ return list(statement._ordered_values)
+ elif statement._values:
+ return list(statement._values.items())
+ else:
+ return []
+
+ @classmethod
+ def _resolved_keys_as_propnames(cls, mapper, resolved_values):
+ values = []
+ for k, v in resolved_values:
+ if isinstance(k, attributes.QueryableAttribute):
+ values.append((k.key, v))
+ continue
+ elif hasattr(k, "__clause_element__"):
+ k = k.__clause_element__()
+
+ if mapper and isinstance(k, expression.ColumnElement):
+ try:
+ attr = mapper._columntoproperty[k]
+ except orm_exc.UnmappedColumnError:
+ pass
+ else:
+ values.append((attr.key, v))
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Invalid expression type: %r" % k
+ )
+ return values
+
+ @classmethod
+ def _do_pre_synchronize_fetch(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ update_options,
+ ):
+ mapper = update_options._subject_mapper
+
+ select_stmt = (
+ select(*(mapper.primary_key + (mapper.select_identity_token,)))
+ .select_from(mapper)
+ .options(*statement._with_options)
+ )
+ select_stmt._where_criteria = statement._where_criteria
+
+ def skip_for_full_returning(orm_context):
+ bind = orm_context.session.get_bind(**orm_context.bind_arguments)
+ if bind.dialect.full_returning:
+ return _result.null_result()
+ else:
+ return None
+
+ result = session.execute(
+ select_stmt,
+ params,
+ execution_options,
+ bind_arguments,
+ _add_event=skip_for_full_returning,
+ )
+ matched_rows = result.fetchall()
+
+ value_evaluators = _EMPTY_DICT
+
+ if statement.__visit_name__ == "lambda_element":
+ # ._resolved is called on every LambdaElement in order to
+ # generate the cache key, so this access does not add
+ # additional expense
+ effective_statement = statement._resolved
+ else:
+ effective_statement = statement
+
+ if effective_statement.__visit_name__ == "update":
+ target_cls = mapper.class_
+ evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
+ resolved_values = cls._get_resolved_values(
+ mapper, effective_statement
+ )
+ resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
+ mapper, resolved_values
+ )
+
+ resolved_keys_as_propnames = cls._resolved_keys_as_propnames(
+ mapper, resolved_values
+ )
+ value_evaluators = {}
+ for key, value in resolved_keys_as_propnames:
+ try:
+ _evaluator = evaluator_compiler.process(
+ coercions.expect(roles.ExpressionElementRole, value)
+ )
+ except evaluator.UnevaluatableError:
+ pass
+ else:
+ value_evaluators[key] = _evaluator
+
+ else:
+ resolved_keys_as_propnames = _EMPTY_DICT
+
+ return update_options + {
+ "_value_evaluators": value_evaluators,
+ "_matched_rows": matched_rows,
+ "_resolved_keys_as_propnames": resolved_keys_as_propnames,
+ }
+
+
+class ORMDMLState:
+ @classmethod
+ def get_entity_description(cls, statement):
+ ext_info = statement.table._annotations["parententity"]
+ mapper = ext_info.mapper
+ if ext_info.is_aliased_class:
+ _label_name = ext_info.name
+ else:
+ _label_name = mapper.class_.__name__
+
+ return {
+ "name": _label_name,
+ "type": mapper.class_,
+ "expr": ext_info.entity,
+ "entity": ext_info.entity,
+ "table": mapper.local_table,
+ }
+
+ @classmethod
+ def get_returning_column_descriptions(cls, statement):
+ def _ent_for_col(c):
+ return c._annotations.get("parententity", None)
+
+ def _attr_for_col(c, ent):
+ if ent is None:
+ return c
+ proxy_key = c._annotations.get("proxy_key", None)
+ if not proxy_key:
+ return c
+ else:
+ return getattr(ent.entity, proxy_key, c)
+
+ return [
+ {
+ "name": c.key,
+ "type": c.type,
+ "expr": _attr_for_col(c, ent),
+ "aliased": ent.is_aliased_class,
+ "entity": ent.entity,
+ }
+ for c, ent in [
+ (c, _ent_for_col(c)) for c in statement._all_selected_columns
+ ]
+ ]
+
+
+@CompileState.plugin_for("orm", "insert")
+class ORMInsert(ORMDMLState, InsertDMLState):
+ @classmethod
+ def orm_pre_session_exec(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ is_reentrant_invoke,
+ ):
+ bind_arguments["clause"] = statement
+ try:
+ plugin_subject = statement._propagate_attrs["plugin_subject"]
+ except KeyError:
+ assert False, "statement had 'orm' plugin but no plugin_subject"
+ else:
+ bind_arguments["mapper"] = plugin_subject.mapper
+
+ return (
+ statement,
+ util.immutabledict(execution_options),
+ )
+
+ @classmethod
+ def orm_setup_cursor_result(
+ cls,
+ session,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ result,
+ ):
+ return result
+
+
+@CompileState.plugin_for("orm", "update")
+class BulkORMUpdate(ORMDMLState, UpdateDMLState, BulkUDCompileState):
+ @classmethod
+ def create_for_statement(cls, statement, compiler, **kw):
+
+ self = cls.__new__(cls)
+
+ ext_info = statement.table._annotations["parententity"]
+
+ self.mapper = mapper = ext_info.mapper
+
+ self.extra_criteria_entities = {}
+
+ self._resolved_values = cls._get_resolved_values(mapper, statement)
+
+ extra_criteria_attributes = {}
+
+ for opt in statement._with_options:
+ if opt._is_criteria_option:
+ opt.get_global_criteria(extra_criteria_attributes)
+
+ if not statement._preserve_parameter_order and statement._values:
+ self._resolved_values = dict(self._resolved_values)
+
+ new_stmt = sql.Update.__new__(sql.Update)
+ new_stmt.__dict__.update(statement.__dict__)
+ new_stmt.table = mapper.local_table
+
+ # note if the statement has _multi_values, these
+ # are passed through to the new statement, which will then raise
+ # InvalidRequestError because UPDATE doesn't support multi_values
+ # right now.
+ if statement._ordered_values:
+ new_stmt._ordered_values = self._resolved_values
+ elif statement._values:
+ new_stmt._values = self._resolved_values
+
+ new_crit = cls._adjust_for_extra_criteria(
+ extra_criteria_attributes, mapper
+ )
+ if new_crit:
+ new_stmt = new_stmt.where(*new_crit)
+
+ # if we are against a lambda statement we might not be the
+ # topmost object that received per-execute annotations
+
+ if (
+ compiler._annotations.get("synchronize_session", None) == "fetch"
+ and compiler.dialect.full_returning
+ ):
+ if new_stmt._returning:
+ raise sa_exc.InvalidRequestError(
+ "Can't use synchronize_session='fetch' "
+ "with explicit returning()"
+ )
+ new_stmt = new_stmt.returning(*mapper.primary_key)
+
+ UpdateDMLState.__init__(self, new_stmt, compiler, **kw)
+
+ return self
+
+ @classmethod
+ def _get_crud_kv_pairs(cls, statement, kv_iterator):
+ plugin_subject = statement._propagate_attrs["plugin_subject"]
+
+ core_get_crud_kv_pairs = UpdateDMLState._get_crud_kv_pairs
+
+ if not plugin_subject or not plugin_subject.mapper:
+ return core_get_crud_kv_pairs(statement, kv_iterator)
+
+ mapper = plugin_subject.mapper
+
+ values = []
+
+ for k, v in kv_iterator:
+ k = coercions.expect(roles.DMLColumnRole, k)
+
+ if isinstance(k, util.string_types):
+ desc = _entity_namespace_key(mapper, k, default=NO_VALUE)
+ if desc is NO_VALUE:
+ values.append(
+ (
+ k,
+ coercions.expect(
+ roles.ExpressionElementRole,
+ v,
+ type_=sqltypes.NullType(),
+ is_crud=True,
+ ),
+ )
+ )
+ else:
+ values.extend(
+ core_get_crud_kv_pairs(
+ statement, desc._bulk_update_tuples(v)
+ )
+ )
+ elif "entity_namespace" in k._annotations:
+ k_anno = k._annotations
+ attr = _entity_namespace_key(
+ k_anno["entity_namespace"], k_anno["proxy_key"]
+ )
+ values.extend(
+ core_get_crud_kv_pairs(
+ statement, attr._bulk_update_tuples(v)
+ )
+ )
+ else:
+ values.append(
+ (
+ k,
+ coercions.expect(
+ roles.ExpressionElementRole,
+ v,
+ type_=sqltypes.NullType(),
+ is_crud=True,
+ ),
+ )
+ )
+ return values
+
+ @classmethod
+ def _do_post_synchronize_evaluate(cls, session, result, update_options):
+
+ states = set()
+ evaluated_keys = list(update_options._value_evaluators.keys())
+ values = update_options._resolved_keys_as_propnames
+ attrib = set(k for k, v in values)
+ for obj in update_options._matched_objects:
+
+ state, dict_ = (
+ attributes.instance_state(obj),
+ attributes.instance_dict(obj),
+ )
+
+ # the evaluated states were gathered across all identity tokens.
+ # however the post_sync events are called per identity token,
+ # so filter.
+ if (
+ update_options._refresh_identity_token is not None
+ and state.identity_token
+ != update_options._refresh_identity_token
+ ):
+ continue
+
+ # only evaluate unmodified attributes
+ to_evaluate = state.unmodified.intersection(evaluated_keys)
+ for key in to_evaluate:
+ if key in dict_:
+ dict_[key] = update_options._value_evaluators[key](obj)
+
+ state.manager.dispatch.refresh(state, None, to_evaluate)
+
+ state._commit(dict_, list(to_evaluate))
+
+ to_expire = attrib.intersection(dict_).difference(to_evaluate)
+ if to_expire:
+ state._expire_attributes(dict_, to_expire)
+
+ states.add(state)
+ session._register_altered(states)
+
+ @classmethod
+ def _do_post_synchronize_fetch(cls, session, result, update_options):
+ target_mapper = update_options._subject_mapper
+
+ states = set()
+ evaluated_keys = list(update_options._value_evaluators.keys())
+
+ if result.returns_rows:
+ matched_rows = [
+ tuple(row) + (update_options._refresh_identity_token,)
+ for row in result.all()
+ ]
+ else:
+ matched_rows = update_options._matched_rows
+
+ objs = [
+ session.identity_map[identity_key]
+ for identity_key in [
+ target_mapper.identity_key_from_primary_key(
+ list(primary_key),
+ identity_token=identity_token,
+ )
+ for primary_key, identity_token in [
+ (row[0:-1], row[-1]) for row in matched_rows
+ ]
+ if update_options._refresh_identity_token is None
+ or identity_token == update_options._refresh_identity_token
+ ]
+ if identity_key in session.identity_map
+ ]
+
+ values = update_options._resolved_keys_as_propnames
+ attrib = set(k for k, v in values)
+
+ for obj in objs:
+ state, dict_ = (
+ attributes.instance_state(obj),
+ attributes.instance_dict(obj),
+ )
+
+ to_evaluate = state.unmodified.intersection(evaluated_keys)
+ for key in to_evaluate:
+ if key in dict_:
+ dict_[key] = update_options._value_evaluators[key](obj)
+ state.manager.dispatch.refresh(state, None, to_evaluate)
+
+ state._commit(dict_, list(to_evaluate))
+
+ to_expire = attrib.intersection(dict_).difference(to_evaluate)
+ if to_expire:
+ state._expire_attributes(dict_, to_expire)
+
+ states.add(state)
+ session._register_altered(states)
+
+
+@CompileState.plugin_for("orm", "delete")
+class BulkORMDelete(ORMDMLState, DeleteDMLState, BulkUDCompileState):
+ @classmethod
+ def create_for_statement(cls, statement, compiler, **kw):
+ self = cls.__new__(cls)
+
+ ext_info = statement.table._annotations["parententity"]
+ self.mapper = mapper = ext_info.mapper
+
+ self.extra_criteria_entities = {}
+
+ extra_criteria_attributes = {}
+
+ for opt in statement._with_options:
+ if opt._is_criteria_option:
+ opt.get_global_criteria(extra_criteria_attributes)
+
+ new_crit = cls._adjust_for_extra_criteria(
+ extra_criteria_attributes, mapper
+ )
+ if new_crit:
+ statement = statement.where(*new_crit)
+
+ if (
+ mapper
+ and compiler._annotations.get("synchronize_session", None)
+ == "fetch"
+ and compiler.dialect.full_returning
+ ):
+ statement = statement.returning(*mapper.primary_key)
+
+ DeleteDMLState.__init__(self, statement, compiler, **kw)
+
+ return self
+
+ @classmethod
+ def _do_post_synchronize_evaluate(cls, session, result, update_options):
+
+ session._remove_newly_deleted(
+ [
+ attributes.instance_state(obj)
+ for obj in update_options._matched_objects
+ ]
+ )
+
+ @classmethod
+ def _do_post_synchronize_fetch(cls, session, result, update_options):
+ target_mapper = update_options._subject_mapper
+
+ if result.returns_rows:
+ matched_rows = [
+ tuple(row) + (update_options._refresh_identity_token,)
+ for row in result.all()
+ ]
+ else:
+ matched_rows = update_options._matched_rows
+
+ for row in matched_rows:
+ primary_key = row[0:-1]
+ identity_token = row[-1]
+
+ # TODO: inline this and call remove_newly_deleted
+ # once
+ identity_key = target_mapper.identity_key_from_primary_key(
+ list(primary_key),
+ identity_token=identity_token,
+ )
+ if identity_key in session.identity_map:
+ session._remove_newly_deleted(
+ [
+ attributes.instance_state(
+ session.identity_map[identity_key]
+ )
+ ]
+ )
diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py
new file mode 100644
index 0000000..d32af17
--- /dev/null
+++ b/lib/sqlalchemy/orm/properties.py
@@ -0,0 +1,430 @@
+# orm/properties.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""MapperProperty implementations.
+
+This is a private module which defines the behavior of individual ORM-
+mapped attributes.
+
+"""
+from __future__ import absolute_import
+
+from . import attributes
+from .descriptor_props import CompositeProperty
+from .descriptor_props import ConcreteInheritedProperty
+from .descriptor_props import SynonymProperty
+from .interfaces import PropComparator
+from .interfaces import StrategizedProperty
+from .relationships import RelationshipProperty
+from .. import log
+from .. import util
+from ..sql import coercions
+from ..sql import roles
+
+
+__all__ = [
+ "ColumnProperty",
+ "CompositeProperty",
+ "ConcreteInheritedProperty",
+ "RelationshipProperty",
+ "SynonymProperty",
+]
+
+
+@log.class_logger
+class ColumnProperty(StrategizedProperty):
+ """Describes an object attribute that corresponds to a table column.
+
+ Public constructor is the :func:`_orm.column_property` function.
+
+ """
+
+ strategy_wildcard_key = "column"
+ inherit_cache = True
+ _links_to_entity = False
+
+ __slots__ = (
+ "columns",
+ "group",
+ "deferred",
+ "instrument",
+ "comparator_factory",
+ "descriptor",
+ "active_history",
+ "expire_on_flush",
+ "info",
+ "doc",
+ "strategy_key",
+ "_creation_order",
+ "_is_polymorphic_discriminator",
+ "_mapped_by_synonym",
+ "_deferred_column_loader",
+ "_raise_column_loader",
+ "_renders_in_subqueries",
+ "raiseload",
+ )
+
+ def __init__(self, *columns, **kwargs):
+ r"""Provide a column-level property for use with a mapping.
+
+ Column-based properties can normally be applied to the mapper's
+ ``properties`` dictionary using the :class:`_schema.Column`
+ element directly.
+ Use this function when the given column is not directly present within
+ the mapper's selectable; examples include SQL expressions, functions,
+ and scalar SELECT queries.
+
+ The :func:`_orm.column_property` function returns an instance of
+ :class:`.ColumnProperty`.
+
+ Columns that aren't present in the mapper's selectable won't be
+ persisted by the mapper and are effectively "read-only" attributes.
+
+ :param \*cols:
+ list of Column objects to be mapped.
+
+ :param active_history=False:
+ When ``True``, indicates that the "previous" value for a
+ scalar attribute should be loaded when replaced, if not
+ already loaded. Normally, history tracking logic for
+ simple non-primary-key scalar values only needs to be
+ aware of the "new" value in order to perform a flush. This
+ flag is available for applications that make use of
+ :func:`.attributes.get_history` or :meth:`.Session.is_modified`
+ which also need to know
+ the "previous" value of the attribute.
+
+ :param comparator_factory: a class which extends
+ :class:`.ColumnProperty.Comparator` which provides custom SQL
+ clause generation for comparison operations.
+
+ :param group:
+ a group name for this property when marked as deferred.
+
+ :param deferred:
+ when True, the column property is "deferred", meaning that
+ it does not load immediately, and is instead loaded when the
+ attribute is first accessed on an instance. See also
+ :func:`~sqlalchemy.orm.deferred`.
+
+ :param doc:
+ optional string that will be applied as the doc on the
+ class-bound descriptor.
+
+ :param expire_on_flush=True:
+ Disable expiry on flush. A column_property() which refers
+ to a SQL expression (and not a single table-bound column)
+ is considered to be a "read only" property; populating it
+ has no effect on the state of data, and it can only return
+ database state. For this reason a column_property()'s value
+ is expired whenever the parent object is involved in a
+ flush, that is, has any kind of "dirty" state within a flush.
+ Setting this parameter to ``False`` will have the effect of
+ leaving any existing value present after the flush proceeds.
+ Note however that the :class:`.Session` with default expiration
+ settings still expires
+ all attributes after a :meth:`.Session.commit` call, however.
+
+ :param info: Optional data dictionary which will be populated into the
+ :attr:`.MapperProperty.info` attribute of this object.
+
+ :param raiseload: if True, indicates the column should raise an error
+ when undeferred, rather than loading the value. This can be
+ altered at query time by using the :func:`.deferred` option with
+ raiseload=False.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`deferred_raiseload`
+
+ .. seealso::
+
+ :ref:`column_property_options` - to map columns while including
+ mapping options
+
+ :ref:`mapper_column_property_sql_expressions` - to map SQL
+ expressions
+
+ """
+ super(ColumnProperty, self).__init__()
+ self.columns = [
+ coercions.expect(roles.LabeledColumnExprRole, c) for c in columns
+ ]
+ self.group = kwargs.pop("group", None)
+ self.deferred = kwargs.pop("deferred", False)
+ self.raiseload = kwargs.pop("raiseload", False)
+ self.instrument = kwargs.pop("_instrument", True)
+ self.comparator_factory = kwargs.pop(
+ "comparator_factory", self.__class__.Comparator
+ )
+ self.descriptor = kwargs.pop("descriptor", None)
+ self.active_history = kwargs.pop("active_history", False)
+ self.expire_on_flush = kwargs.pop("expire_on_flush", True)
+
+ if "info" in kwargs:
+ self.info = kwargs.pop("info")
+
+ if "doc" in kwargs:
+ self.doc = kwargs.pop("doc")
+ else:
+ for col in reversed(self.columns):
+ doc = getattr(col, "doc", None)
+ if doc is not None:
+ self.doc = doc
+ break
+ else:
+ self.doc = None
+
+ if kwargs:
+ raise TypeError(
+ "%s received unexpected keyword argument(s): %s"
+ % (self.__class__.__name__, ", ".join(sorted(kwargs.keys())))
+ )
+
+ util.set_creation_order(self)
+
+ self.strategy_key = (
+ ("deferred", self.deferred),
+ ("instrument", self.instrument),
+ )
+ if self.raiseload:
+ self.strategy_key += (("raiseload", True),)
+
+ def _memoized_attr__renders_in_subqueries(self):
+ return ("deferred", True) not in self.strategy_key or (
+ self not in self.parent._readonly_props
+ )
+
+ @util.preload_module("sqlalchemy.orm.state", "sqlalchemy.orm.strategies")
+ def _memoized_attr__deferred_column_loader(self):
+ state = util.preloaded.orm_state
+ strategies = util.preloaded.orm_strategies
+ return state.InstanceState._instance_level_callable_processor(
+ self.parent.class_manager,
+ strategies.LoadDeferredColumns(self.key),
+ self.key,
+ )
+
+ @util.preload_module("sqlalchemy.orm.state", "sqlalchemy.orm.strategies")
+ def _memoized_attr__raise_column_loader(self):
+ state = util.preloaded.orm_state
+ strategies = util.preloaded.orm_strategies
+ return state.InstanceState._instance_level_callable_processor(
+ self.parent.class_manager,
+ strategies.LoadDeferredColumns(self.key, True),
+ self.key,
+ )
+
+ def __clause_element__(self):
+ """Allow the ColumnProperty to work in expression before it is turned
+ into an instrumented attribute.
+ """
+
+ return self.expression
+
+ @property
+ def expression(self):
+ """Return the primary column or expression for this ColumnProperty.
+
+ E.g.::
+
+
+ class File(Base):
+ # ...
+
+ name = Column(String(64))
+ extension = Column(String(8))
+ filename = column_property(name + '.' + extension)
+ path = column_property('C:/' + filename.expression)
+
+ .. seealso::
+
+ :ref:`mapper_column_property_sql_expressions_composed`
+
+ """
+ return self.columns[0]
+
+ def instrument_class(self, mapper):
+ if not self.instrument:
+ return
+
+ attributes.register_descriptor(
+ mapper.class_,
+ self.key,
+ comparator=self.comparator_factory(self, mapper),
+ parententity=mapper,
+ doc=self.doc,
+ )
+
+ def do_init(self):
+ super(ColumnProperty, self).do_init()
+
+ if len(self.columns) > 1 and set(self.parent.primary_key).issuperset(
+ self.columns
+ ):
+ util.warn(
+ (
+ "On mapper %s, primary key column '%s' is being combined "
+ "with distinct primary key column '%s' in attribute '%s'. "
+ "Use explicit properties to give each column its own "
+ "mapped attribute name."
+ )
+ % (self.parent, self.columns[1], self.columns[0], self.key)
+ )
+
+ def copy(self):
+ return ColumnProperty(
+ deferred=self.deferred,
+ group=self.group,
+ active_history=self.active_history,
+ *self.columns
+ )
+
+ def _getcommitted(
+ self, state, dict_, column, passive=attributes.PASSIVE_OFF
+ ):
+ return state.get_impl(self.key).get_committed_value(
+ state, dict_, passive=passive
+ )
+
+ def merge(
+ self,
+ session,
+ source_state,
+ source_dict,
+ dest_state,
+ dest_dict,
+ load,
+ _recursive,
+ _resolve_conflict_map,
+ ):
+ if not self.instrument:
+ return
+ elif self.key in source_dict:
+ value = source_dict[self.key]
+
+ if not load:
+ dest_dict[self.key] = value
+ else:
+ impl = dest_state.get_impl(self.key)
+ impl.set(dest_state, dest_dict, value, None)
+ elif dest_state.has_identity and self.key not in dest_dict:
+ dest_state._expire_attributes(
+ dest_dict, [self.key], no_loader=True
+ )
+
+ class Comparator(util.MemoizedSlots, PropComparator):
+ """Produce boolean, comparison, and other operators for
+ :class:`.ColumnProperty` attributes.
+
+ See the documentation for :class:`.PropComparator` for a brief
+ overview.
+
+ .. seealso::
+
+ :class:`.PropComparator`
+
+ :class:`.ColumnOperators`
+
+ :ref:`types_operators`
+
+ :attr:`.TypeEngine.comparator_factory`
+
+ """
+
+ __slots__ = "__clause_element__", "info", "expressions"
+
+ def _orm_annotate_column(self, column):
+ """annotate and possibly adapt a column to be returned
+ as the mapped-attribute exposed version of the column.
+
+ The column in this context needs to act as much like the
+ column in an ORM mapped context as possible, so includes
+ annotations to give hints to various ORM functions as to
+ the source entity of this column. It also adapts it
+ to the mapper's with_polymorphic selectable if one is
+ present.
+
+ """
+
+ pe = self._parententity
+ annotations = {
+ "entity_namespace": pe,
+ "parententity": pe,
+ "parentmapper": pe,
+ "proxy_key": self.prop.key,
+ }
+
+ col = column
+
+ # for a mapper with polymorphic_on and an adapter, return
+ # the column against the polymorphic selectable.
+ # see also orm.util._orm_downgrade_polymorphic_columns
+ # for the reverse operation.
+ if self._parentmapper._polymorphic_adapter:
+ mapper_local_col = col
+ col = self._parentmapper._polymorphic_adapter.traverse(col)
+
+ # this is a clue to the ORM Query etc. that this column
+ # was adapted to the mapper's polymorphic_adapter. the
+ # ORM uses this hint to know which column its adapting.
+ annotations["adapt_column"] = mapper_local_col
+
+ return col._annotate(annotations)._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": pe}
+ )
+
+ def _memoized_method___clause_element__(self):
+ if self.adapter:
+ return self.adapter(self.prop.columns[0], self.prop.key)
+ else:
+ return self._orm_annotate_column(self.prop.columns[0])
+
+ def _memoized_attr_info(self):
+ """The .info dictionary for this attribute."""
+
+ ce = self.__clause_element__()
+ try:
+ return ce.info
+ except AttributeError:
+ return self.prop.info
+
+ def _memoized_attr_expressions(self):
+ """The full sequence of columns referenced by this
+ attribute, adjusted for any aliasing in progress.
+
+ .. versionadded:: 1.3.17
+
+ """
+ if self.adapter:
+ return [
+ self.adapter(col, self.prop.key)
+ for col in self.prop.columns
+ ]
+ else:
+ return [
+ self._orm_annotate_column(col) for col in self.prop.columns
+ ]
+
+ def _fallback_getattr(self, key):
+ """proxy attribute access down to the mapped column.
+
+ this allows user-defined comparison methods to be accessed.
+ """
+ return getattr(self.__clause_element__(), key)
+
+ def operate(self, op, *other, **kwargs):
+ return op(self.__clause_element__(), *other, **kwargs)
+
+ def reverse_operate(self, op, other, **kwargs):
+ col = self.__clause_element__()
+ return op(col._bind_param(op, other), col, **kwargs)
+
+ def __str__(self):
+ return str(self.parent.class_.__name__) + "." + self.key
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
new file mode 100644
index 0000000..99e4591
--- /dev/null
+++ b/lib/sqlalchemy/orm/query.py
@@ -0,0 +1,3508 @@
+# orm/query.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""The Query class and support.
+
+Defines the :class:`_query.Query` class, the central
+construct used by the ORM to construct database queries.
+
+The :class:`_query.Query` class should not be confused with the
+:class:`_expression.Select` class, which defines database
+SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
+``Select`` in that it returns ORM-mapped objects and interacts with an
+ORM session, whereas the ``Select`` construct interacts directly with the
+database to return iterable result sets.
+
+"""
+import itertools
+import operator
+import types
+
+from . import exc as orm_exc
+from . import interfaces
+from . import loading
+from . import util as orm_util
+from .base import _assertions
+from .context import _column_descriptions
+from .context import _legacy_determine_last_joined_entity
+from .context import _legacy_filter_by_entity_zero
+from .context import LABEL_STYLE_LEGACY_ORM
+from .context import ORMCompileState
+from .context import ORMFromStatementCompileState
+from .context import QueryContext
+from .interfaces import ORMColumnsClauseRole
+from .util import aliased
+from .util import AliasedClass
+from .util import object_mapper
+from .util import with_parent
+from .util import with_polymorphic
+from .. import exc as sa_exc
+from .. import inspect
+from .. import inspection
+from .. import log
+from .. import sql
+from .. import util
+from ..sql import coercions
+from ..sql import elements
+from ..sql import expression
+from ..sql import roles
+from ..sql import Select
+from ..sql import util as sql_util
+from ..sql import visitors
+from ..sql.annotation import SupportsCloneAnnotations
+from ..sql.base import _entity_namespace_key
+from ..sql.base import _generative
+from ..sql.base import Executable
+from ..sql.selectable import _MemoizedSelectEntities
+from ..sql.selectable import _SelectFromElements
+from ..sql.selectable import ForUpdateArg
+from ..sql.selectable import GroupedElement
+from ..sql.selectable import HasHints
+from ..sql.selectable import HasPrefixes
+from ..sql.selectable import HasSuffixes
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+from ..sql.selectable import SelectBase
+from ..sql.selectable import SelectStatementGrouping
+from ..sql.visitors import InternalTraversal
+from ..util import collections_abc
+
+__all__ = ["Query", "QueryContext", "aliased"]
+
+
+@inspection._self_inspects
+@log.class_logger
+class Query(
+ _SelectFromElements,
+ SupportsCloneAnnotations,
+ HasPrefixes,
+ HasSuffixes,
+ HasHints,
+ Executable,
+):
+
+ """ORM-level SQL construction object.
+
+ :class:`_query.Query`
+ is the source of all SELECT statements generated by the
+ ORM, both those formulated by end-user query operations as well as by
+ high level internal operations such as related collection loading. It
+ features a generative interface whereby successive calls return a new
+ :class:`_query.Query` object, a copy of the former with additional
+ criteria and options associated with it.
+
+ :class:`_query.Query` objects are normally initially generated using the
+ :meth:`~.Session.query` method of :class:`.Session`, and in
+ less common cases by instantiating the :class:`_query.Query` directly and
+ associating with a :class:`.Session` using the
+ :meth:`_query.Query.with_session`
+ method.
+
+ For a full walk through of :class:`_query.Query` usage, see the
+ :ref:`ormtutorial_toplevel`.
+
+ """
+
+ # elements that are in Core and can be cached in the same way
+ _where_criteria = ()
+ _having_criteria = ()
+
+ _order_by_clauses = ()
+ _group_by_clauses = ()
+ _limit_clause = None
+ _offset_clause = None
+
+ _distinct = False
+ _distinct_on = ()
+
+ _for_update_arg = None
+ _correlate = ()
+ _auto_correlate = True
+ _from_obj = ()
+ _setup_joins = ()
+ _legacy_setup_joins = ()
+ _label_style = LABEL_STYLE_LEGACY_ORM
+
+ _memoized_select_entities = ()
+
+ _compile_options = ORMCompileState.default_compile_options
+
+ load_options = QueryContext.default_load_options + {
+ "_legacy_uniquing": True
+ }
+
+ _params = util.EMPTY_DICT
+
+ # local Query builder state, not needed for
+ # compilation or execution
+ _aliased_generation = None
+ _enable_assertions = True
+ _last_joined_entity = None
+ _statement = None
+
+ # mirrors that of ClauseElement, used to propagate the "orm"
+ # plugin as well as the "subject" of the plugin, e.g. the mapper
+ # we are querying against.
+ _propagate_attrs = util.immutabledict()
+
+ def __init__(self, entities, session=None):
+ """Construct a :class:`_query.Query` directly.
+
+ E.g.::
+
+ q = Query([User, Address], session=some_session)
+
+ The above is equivalent to::
+
+ q = some_session.query(User, Address)
+
+ :param entities: a sequence of entities and/or SQL expressions.
+
+ :param session: a :class:`.Session` with which the
+ :class:`_query.Query`
+ will be associated. Optional; a :class:`_query.Query`
+ can be associated
+ with a :class:`.Session` generatively via the
+ :meth:`_query.Query.with_session` method as well.
+
+ .. seealso::
+
+ :meth:`.Session.query`
+
+ :meth:`_query.Query.with_session`
+
+ """
+
+ self.session = session
+ self._set_entities(entities)
+
+ def _set_propagate_attrs(self, values):
+ self._propagate_attrs = util.immutabledict(values)
+ return self
+
+ def _set_entities(self, entities):
+ self._raw_columns = [
+ coercions.expect(
+ roles.ColumnsClauseRole,
+ ent,
+ apply_propagate_attrs=self,
+ post_inspect=True,
+ )
+ for ent in util.to_list(entities)
+ ]
+
+ def _entity_from_pre_ent_zero(self):
+ if not self._raw_columns:
+ return None
+
+ ent = self._raw_columns[0]
+
+ if "parententity" in ent._annotations:
+ return ent._annotations["parententity"]
+ elif isinstance(ent, ORMColumnsClauseRole):
+ return ent.entity
+ elif "bundle" in ent._annotations:
+ return ent._annotations["bundle"]
+ else:
+ # label, other SQL expression
+ for element in visitors.iterate(ent):
+ if "parententity" in element._annotations:
+ return element._annotations["parententity"]
+ else:
+ return None
+
+ def _only_full_mapper_zero(self, methname):
+ if (
+ len(self._raw_columns) != 1
+ or "parententity" not in self._raw_columns[0]._annotations
+ or not self._raw_columns[0].is_selectable
+ ):
+ raise sa_exc.InvalidRequestError(
+ "%s() can only be used against "
+ "a single mapped class." % methname
+ )
+
+ return self._raw_columns[0]._annotations["parententity"]
+
+ def _set_select_from(self, obj, set_base_alias):
+ fa = [
+ coercions.expect(
+ roles.StrictFromClauseRole,
+ elem,
+ allow_select=True,
+ apply_propagate_attrs=self,
+ )
+ for elem in obj
+ ]
+
+ self._compile_options += {"_set_base_alias": set_base_alias}
+ self._from_obj = tuple(fa)
+
+ @_generative
+ def _set_lazyload_from(self, state):
+ self.load_options += {"_lazy_loaded_from": state}
+
+ def _get_condition(self):
+ return self._no_criterion_condition(
+ "get", order_by=False, distinct=False
+ )
+
+ def _get_existing_condition(self):
+ self._no_criterion_assertion("get", order_by=False, distinct=False)
+
+ def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
+ if not self._enable_assertions:
+ return
+ if (
+ self._where_criteria
+ or self._statement is not None
+ or self._from_obj
+ or self._legacy_setup_joins
+ or self._limit_clause is not None
+ or self._offset_clause is not None
+ or self._group_by_clauses
+ or (order_by and self._order_by_clauses)
+ or (distinct and self._distinct)
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Query.%s() being called on a "
+ "Query with existing criterion. " % meth
+ )
+
+ def _no_criterion_condition(self, meth, order_by=True, distinct=True):
+ self._no_criterion_assertion(meth, order_by, distinct)
+
+ self._from_obj = self._legacy_setup_joins = ()
+ if self._statement is not None:
+ self._compile_options += {"_statement": None}
+ self._where_criteria = ()
+ self._distinct = False
+
+ self._order_by_clauses = self._group_by_clauses = ()
+
+ def _no_clauseelement_condition(self, meth):
+ if not self._enable_assertions:
+ return
+ if self._order_by_clauses:
+ raise sa_exc.InvalidRequestError(
+ "Query.%s() being called on a "
+ "Query with existing criterion. " % meth
+ )
+ self._no_criterion_condition(meth)
+
+ def _no_statement_condition(self, meth):
+ if not self._enable_assertions:
+ return
+ if self._statement is not None:
+ raise sa_exc.InvalidRequestError(
+ (
+ "Query.%s() being called on a Query with an existing full "
+ "statement - can't apply criterion."
+ )
+ % meth
+ )
+
+ def _no_limit_offset(self, meth):
+ if not self._enable_assertions:
+ return
+ if self._limit_clause is not None or self._offset_clause is not None:
+ raise sa_exc.InvalidRequestError(
+ "Query.%s() being called on a Query which already has LIMIT "
+ "or OFFSET applied. Call %s() before limit() or offset() "
+ "are applied." % (meth, meth)
+ )
+
+ @property
+ def _has_row_limiting_clause(self):
+ return (
+ self._limit_clause is not None or self._offset_clause is not None
+ )
+
+ def _get_options(
+ self,
+ populate_existing=None,
+ version_check=None,
+ only_load_props=None,
+ refresh_state=None,
+ identity_token=None,
+ ):
+ load_options = {}
+ compile_options = {}
+
+ if version_check:
+ load_options["_version_check"] = version_check
+ if populate_existing:
+ load_options["_populate_existing"] = populate_existing
+ if refresh_state:
+ load_options["_refresh_state"] = refresh_state
+ compile_options["_for_refresh_state"] = True
+ if only_load_props:
+ compile_options["_only_load_props"] = frozenset(only_load_props)
+ if identity_token:
+ load_options["_refresh_identity_token"] = identity_token
+
+ if load_options:
+ self.load_options += load_options
+ if compile_options:
+ self._compile_options += compile_options
+
+ return self
+
+ def _clone(self):
+ return self._generate()
+
+ @property
+ def statement(self):
+ """The full SELECT statement represented by this Query.
+
+ The statement by default will not have disambiguating labels
+ applied to the construct unless with_labels(True) is called
+ first.
+
+ """
+
+ # .statement can return the direct future.Select() construct here, as
+ # long as we are not using subsequent adaption features that
+ # are made against raw entities, e.g. from_self(), with_polymorphic(),
+ # select_entity_from(). If these features are being used, then
+ # the Select() we return will not have the correct .selected_columns
+ # collection and will not embed in subsequent queries correctly.
+ # We could find a way to make this collection "correct", however
+ # this would not be too different from doing the full compile as
+ # we are doing in any case, the Select() would still not have the
+ # proper state for other attributes like whereclause, order_by,
+ # and these features are all deprecated in any case.
+ #
+ # for these reasons, Query is not a Select, it remains an ORM
+ # object for which __clause_element__() must be called in order for
+ # it to provide a real expression object.
+ #
+ # from there, it starts to look much like Query itself won't be
+ # passed into the execute process and wont generate its own cache
+ # key; this will all occur in terms of the ORM-enabled Select.
+ if (
+ not self._compile_options._set_base_alias
+ and not self._compile_options._with_polymorphic_adapt_map
+ ):
+ # if we don't have legacy top level aliasing features in use
+ # then convert to a future select() directly
+ stmt = self._statement_20(for_statement=True)
+ else:
+ stmt = self._compile_state(for_statement=True).statement
+
+ if self._params:
+ stmt = stmt.params(self._params)
+
+ return stmt
+
+ def _final_statement(self, legacy_query_style=True):
+ """Return the 'final' SELECT statement for this :class:`.Query`.
+
+ This is the Core-only select() that will be rendered by a complete
+ compilation of this query, and is what .statement used to return
+ in 1.3.
+
+ This method creates a complete compile state so is fairly expensive.
+
+ """
+
+ q = self._clone()
+
+ return q._compile_state(
+ use_legacy_query_style=legacy_query_style
+ ).statement
+
+ def _statement_20(self, for_statement=False, use_legacy_query_style=True):
+ # TODO: this event needs to be deprecated, as it currently applies
+ # only to ORM query and occurs at this spot that is now more
+ # or less an artificial spot
+ if self.dispatch.before_compile:
+ for fn in self.dispatch.before_compile:
+ new_query = fn(self)
+ if new_query is not None and new_query is not self:
+ self = new_query
+ if not fn._bake_ok:
+ self._compile_options += {"_bake_ok": False}
+
+ compile_options = self._compile_options
+ compile_options += {
+ "_for_statement": for_statement,
+ "_use_legacy_query_style": use_legacy_query_style,
+ }
+
+ if self._statement is not None:
+ stmt = FromStatement(self._raw_columns, self._statement)
+ stmt.__dict__.update(
+ _with_options=self._with_options,
+ _with_context_options=self._with_context_options,
+ _compile_options=compile_options,
+ _execution_options=self._execution_options,
+ _propagate_attrs=self._propagate_attrs,
+ )
+ else:
+ # Query / select() internal attributes are 99% cross-compatible
+ stmt = Select._create_raw_select(**self.__dict__)
+ stmt.__dict__.update(
+ _label_style=self._label_style,
+ _compile_options=compile_options,
+ _propagate_attrs=self._propagate_attrs,
+ )
+ stmt.__dict__.pop("session", None)
+
+ # ensure the ORM context is used to compile the statement, even
+ # if it has no ORM entities. This is so ORM-only things like
+ # _legacy_joins are picked up that wouldn't be picked up by the
+ # Core statement context
+ if "compile_state_plugin" not in stmt._propagate_attrs:
+ stmt._propagate_attrs = stmt._propagate_attrs.union(
+ {"compile_state_plugin": "orm", "plugin_subject": None}
+ )
+
+ return stmt
+
+ def subquery(
+ self,
+ name=None,
+ with_labels=False,
+ reduce_columns=False,
+ ):
+ """Return the full SELECT statement represented by
+ this :class:`_query.Query`, embedded within an
+ :class:`_expression.Alias`.
+
+ Eager JOIN generation within the query is disabled.
+
+ :param name: string name to be assigned as the alias;
+ this is passed through to :meth:`_expression.FromClause.alias`.
+ If ``None``, a name will be deterministically generated
+ at compile time.
+
+ :param with_labels: if True, :meth:`.with_labels` will be called
+ on the :class:`_query.Query` first to apply table-qualified labels
+ to all columns.
+
+ :param reduce_columns: if True,
+ :meth:`_expression.Select.reduce_columns` will
+ be called on the resulting :func:`_expression.select` construct,
+ to remove same-named columns where one also refers to the other
+ via foreign key or WHERE clause equivalence.
+
+ """
+ q = self.enable_eagerloads(False)
+ if with_labels:
+ q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+
+ q = q.statement
+
+ if reduce_columns:
+ q = q.reduce_columns()
+ return q.alias(name=name)
+
+ def cte(self, name=None, recursive=False, nesting=False):
+ r"""Return the full SELECT statement represented by this
+ :class:`_query.Query` represented as a common table expression (CTE).
+
+ Parameters and usage are the same as those of the
+ :meth:`_expression.SelectBase.cte` method; see that method for
+ further details.
+
+ Here is the `PostgreSQL WITH
+ RECURSIVE example
+ <https://www.postgresql.org/docs/current/static/queries-with.html>`_.
+ Note that, in this example, the ``included_parts`` cte and the
+ ``incl_alias`` alias of it are Core selectables, which
+ means the columns are accessed via the ``.c.`` attribute. The
+ ``parts_alias`` object is an :func:`_orm.aliased` instance of the
+ ``Part`` entity, so column-mapped attributes are available
+ directly::
+
+ from sqlalchemy.orm import aliased
+
+ class Part(Base):
+ __tablename__ = 'part'
+ part = Column(String, primary_key=True)
+ sub_part = Column(String, primary_key=True)
+ quantity = Column(Integer)
+
+ included_parts = session.query(
+ Part.sub_part,
+ Part.part,
+ Part.quantity).\
+ filter(Part.part=="our part").\
+ cte(name="included_parts", recursive=True)
+
+ incl_alias = aliased(included_parts, name="pr")
+ parts_alias = aliased(Part, name="p")
+ included_parts = included_parts.union_all(
+ session.query(
+ parts_alias.sub_part,
+ parts_alias.part,
+ parts_alias.quantity).\
+ filter(parts_alias.part==incl_alias.c.sub_part)
+ )
+
+ q = session.query(
+ included_parts.c.sub_part,
+ func.sum(included_parts.c.quantity).
+ label('total_quantity')
+ ).\
+ group_by(included_parts.c.sub_part)
+
+ .. seealso::
+
+ :meth:`_expression.HasCTE.cte`
+
+ """
+ return self.enable_eagerloads(False).statement.cte(
+ name=name, recursive=recursive, nesting=nesting
+ )
+
+ def label(self, name):
+ """Return the full SELECT statement represented by this
+ :class:`_query.Query`, converted
+ to a scalar subquery with a label of the given name.
+
+ Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
+
+ """
+
+ return self.enable_eagerloads(False).statement.label(name)
+
+ @util.deprecated(
+ "1.4",
+ "The :meth:`_query.Query.as_scalar` method is deprecated and will be "
+ "removed in a future release. Please refer to "
+ ":meth:`_query.Query.scalar_subquery`.",
+ )
+ def as_scalar(self):
+ """Return the full SELECT statement represented by this
+ :class:`_query.Query`, converted to a scalar subquery.
+
+ """
+ return self.scalar_subquery()
+
+ def scalar_subquery(self):
+ """Return the full SELECT statement represented by this
+ :class:`_query.Query`, converted to a scalar subquery.
+
+ Analogous to
+ :meth:`sqlalchemy.sql.expression.SelectBase.scalar_subquery`.
+
+ .. versionchanged:: 1.4 The :meth:`_query.Query.scalar_subquery`
+ method replaces the :meth:`_query.Query.as_scalar` method.
+
+ """
+
+ return self.enable_eagerloads(False).statement.scalar_subquery()
+
+ @property
+ def selectable(self):
+ """Return the :class:`_expression.Select` object emitted by this
+ :class:`_query.Query`.
+
+ Used for :func:`_sa.inspect` compatibility, this is equivalent to::
+
+ query.enable_eagerloads(False).with_labels().statement
+
+ """
+ return self.__clause_element__()
+
+ def __clause_element__(self):
+ return (
+ self._with_compile_options(
+ _enable_eagerloads=False, _render_for_subquery=True
+ )
+ .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+ .statement
+ )
+
+ @_generative
+ def only_return_tuples(self, value):
+ """When set to True, the query results will always be a tuple.
+
+ This is specifically for single element queries. The default is False.
+
+ .. versionadded:: 1.2.5
+
+ .. seealso::
+
+ :meth:`_query.Query.is_single_entity`
+
+ """
+ self.load_options += dict(_only_return_tuples=value)
+
+ @property
+ def is_single_entity(self):
+ """Indicates if this :class:`_query.Query`
+ returns tuples or single entities.
+
+ Returns True if this query returns a single entity for each instance
+ in its result list, and False if this query returns a tuple of entities
+ for each result.
+
+ .. versionadded:: 1.3.11
+
+ .. seealso::
+
+ :meth:`_query.Query.only_return_tuples`
+
+ """
+ return (
+ not self.load_options._only_return_tuples
+ and len(self._raw_columns) == 1
+ and "parententity" in self._raw_columns[0]._annotations
+ and isinstance(
+ self._raw_columns[0]._annotations["parententity"],
+ ORMColumnsClauseRole,
+ )
+ )
+
+ @_generative
+ def enable_eagerloads(self, value):
+ """Control whether or not eager joins and subqueries are
+ rendered.
+
+ When set to False, the returned Query will not render
+ eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
+ :func:`~sqlalchemy.orm.subqueryload` options
+ or mapper-level ``lazy='joined'``/``lazy='subquery'``
+ configurations.
+
+ This is used primarily when nesting the Query's
+ statement into a subquery or other
+ selectable, or when using :meth:`_query.Query.yield_per`.
+
+ """
+ self._compile_options += {"_enable_eagerloads": value}
+
+ @_generative
+ def _with_compile_options(self, **opt):
+ self._compile_options += opt
+
+ @util.deprecated_20(
+ ":meth:`_orm.Query.with_labels` and :meth:`_orm.Query.apply_labels`",
+ alternative="Use set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) "
+ "instead.",
+ )
+ def with_labels(self):
+ return self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+
+ apply_labels = with_labels
+
+ @property
+ def get_label_style(self):
+ """
+ Retrieve the current label style.
+
+ .. versionadded:: 1.4
+
+ """
+ return self._label_style
+
+ def set_label_style(self, style):
+ """Apply column labels to the return value of Query.statement.
+
+ Indicates that this Query's `statement` accessor should return
+ a SELECT statement that applies labels to all columns in the
+ form <tablename>_<columnname>; this is commonly used to
+ disambiguate columns from multiple tables which have the same
+ name.
+
+ When the `Query` actually issues SQL to load rows, it always
+ uses column labeling.
+
+ .. note:: The :meth:`_query.Query.set_label_style` method *only* applies
+ the output of :attr:`_query.Query.statement`, and *not* to any of
+ the result-row invoking systems of :class:`_query.Query` itself,
+ e.g.
+ :meth:`_query.Query.first`, :meth:`_query.Query.all`, etc.
+ To execute
+ a query using :meth:`_query.Query.set_label_style`, invoke the
+ :attr:`_query.Query.statement` using :meth:`.Session.execute`::
+
+ result = session.execute(
+ query
+ .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+ .statement
+ )
+
+ .. versionadded:: 1.4
+
+ """ # noqa
+ if self._label_style is not style:
+ self = self._generate()
+ self._label_style = style
+ return self
+
+ @_generative
+ def enable_assertions(self, value):
+ """Control whether assertions are generated.
+
+ When set to False, the returned Query will
+ not assert its state before certain operations,
+ including that LIMIT/OFFSET has not been applied
+ when filter() is called, no criterion exists
+ when get() is called, and no "from_statement()"
+ exists when filter()/order_by()/group_by() etc.
+ is called. This more permissive mode is used by
+ custom Query subclasses to specify criterion or
+ other modifiers outside of the usual usage patterns.
+
+ Care should be taken to ensure that the usage
+ pattern is even possible. A statement applied
+ by from_statement() will override any criterion
+ set by filter() or order_by(), for example.
+
+ """
+ self._enable_assertions = value
+
+ @property
+ def whereclause(self):
+ """A readonly attribute which returns the current WHERE criterion for
+ this Query.
+
+ This returned value is a SQL expression construct, or ``None`` if no
+ criterion has been established.
+
+ """
+ return sql.elements.BooleanClauseList._construct_for_whereclause(
+ self._where_criteria
+ )
+
+ @_generative
+ def _with_current_path(self, path):
+ """indicate that this query applies to objects loaded
+ within a certain path.
+
+ Used by deferred loaders (see strategies.py) which transfer
+ query options from an originating query to a newly generated
+ query intended for the deferred load.
+
+ """
+ self._compile_options += {"_current_path": path}
+
+ @_generative
+ @_assertions(_no_clauseelement_condition)
+ @util.deprecated_20(
+ ":meth:`_orm.Query.with_polymorphic`",
+ alternative="Use the orm.with_polymorphic() standalone function",
+ )
+ def with_polymorphic(
+ self, cls_or_mappers, selectable=None, polymorphic_on=None
+ ):
+ """Load columns for inheriting classes.
+
+ This is a legacy method which is replaced by the
+ :func:`_orm.with_polymorphic` function.
+
+ .. warning:: The :meth:`_orm.Query.with_polymorphic` method does
+ **not** support 1.4/2.0 style features including
+ :func:`_orm.with_loader_criteria`. Please migrate code
+ to use :func:`_orm.with_polymorphic`.
+
+ :meth:`_query.Query.with_polymorphic` applies transformations
+ to the "main" mapped class represented by this :class:`_query.Query`.
+ The "main" mapped class here means the :class:`_query.Query`
+ object's first argument is a full class, i.e.
+ ``session.query(SomeClass)``. These transformations allow additional
+ tables to be present in the FROM clause so that columns for a
+ joined-inheritance subclass are available in the query, both for the
+ purposes of load-time efficiency as well as the ability to use
+ these columns at query time.
+
+ .. seealso::
+
+ :ref:`with_polymorphic` - illustrates current patterns
+
+ """
+
+ entity = _legacy_filter_by_entity_zero(self)
+
+ wp = with_polymorphic(
+ entity,
+ cls_or_mappers,
+ selectable=selectable,
+ polymorphic_on=polymorphic_on,
+ )
+
+ self._compile_options = self._compile_options.add_to_element(
+ "_with_polymorphic_adapt_map", ((entity, inspect(wp)),)
+ )
+
+ @_generative
+ def yield_per(self, count):
+ r"""Yield only ``count`` rows at a time.
+
+ The purpose of this method is when fetching very large result sets
+ (> 10K rows), to batch results in sub-collections and yield them
+ out partially, so that the Python interpreter doesn't need to declare
+ very large areas of memory which is both time consuming and leads
+ to excessive memory use. The performance from fetching hundreds of
+ thousands of rows can often double when a suitable yield-per setting
+ (e.g. approximately 1000) is used, even with DBAPIs that buffer
+ rows (which are most).
+
+ As of SQLAlchemy 1.4, the :meth:`_orm.Query.yield_per` method is
+ equivalent to using the ``yield_per`` execution option at the ORM
+ level. See the section :ref:`orm_queryguide_yield_per` for further
+ background on this option.
+
+ .. seealso::
+
+ :ref:`orm_queryguide_yield_per`
+
+ """
+ self.load_options += {"_yield_per": count}
+
+ @util.deprecated_20(
+ ":meth:`_orm.Query.get`",
+ alternative="The method is now available as :meth:`_orm.Session.get`",
+ becomes_legacy=True,
+ )
+ def get(self, ident):
+ """Return an instance based on the given primary key identifier,
+ or ``None`` if not found.
+
+ E.g.::
+
+ my_user = session.query(User).get(5)
+
+ some_object = session.query(VersionedFoo).get((5, 10))
+
+ some_object = session.query(VersionedFoo).get(
+ {"id": 5, "version_id": 10})
+
+ :meth:`_query.Query.get` is special in that it provides direct
+ access to the identity map of the owning :class:`.Session`.
+ If the given primary key identifier is present
+ in the local identity map, the object is returned
+ directly from this collection and no SQL is emitted,
+ unless the object has been marked fully expired.
+ If not present,
+ a SELECT is performed in order to locate the object.
+
+ :meth:`_query.Query.get` also will perform a check if
+ the object is present in the identity map and
+ marked as expired - a SELECT
+ is emitted to refresh the object as well as to
+ ensure that the row is still present.
+ If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
+
+ :meth:`_query.Query.get` is only used to return a single
+ mapped instance, not multiple instances or
+ individual column constructs, and strictly
+ on a single primary key value. The originating
+ :class:`_query.Query` must be constructed in this way,
+ i.e. against a single mapped entity,
+ with no additional filtering criterion. Loading
+ options via :meth:`_query.Query.options` may be applied
+ however, and will be used if the object is not
+ yet locally present.
+
+ :param ident: A scalar, tuple, or dictionary representing the
+ primary key. For a composite (e.g. multiple column) primary key,
+ a tuple or dictionary should be passed.
+
+ For a single-column primary key, the scalar calling form is typically
+ the most expedient. If the primary key of a row is the value "5",
+ the call looks like::
+
+ my_object = query.get(5)
+
+ The tuple form contains primary key values typically in
+ the order in which they correspond to the mapped
+ :class:`_schema.Table`
+ object's primary key columns, or if the
+ :paramref:`_orm.Mapper.primary_key` configuration parameter were
+ used, in
+ the order used for that parameter. For example, if the primary key
+ of a row is represented by the integer
+ digits "5, 10" the call would look like::
+
+ my_object = query.get((5, 10))
+
+ The dictionary form should include as keys the mapped attribute names
+ corresponding to each element of the primary key. If the mapped class
+ has the attributes ``id``, ``version_id`` as the attributes which
+ store the object's primary key value, the call would look like::
+
+ my_object = query.get({"id": 5, "version_id": 10})
+
+ .. versionadded:: 1.3 the :meth:`_query.Query.get`
+ method now optionally
+ accepts a dictionary of attribute names to values in order to
+ indicate a primary key identifier.
+
+
+ :return: The object instance, or ``None``.
+
+ """
+ self._no_criterion_assertion("get", order_by=False, distinct=False)
+
+ # we still implement _get_impl() so that baked query can override
+ # it
+ return self._get_impl(ident, loading.load_on_pk_identity)
+
+ def _get_impl(self, primary_key_identity, db_load_fn, identity_token=None):
+ mapper = self._only_full_mapper_zero("get")
+ return self.session._get_impl(
+ mapper,
+ primary_key_identity,
+ db_load_fn,
+ populate_existing=self.load_options._populate_existing,
+ with_for_update=self._for_update_arg,
+ options=self._with_options,
+ identity_token=identity_token,
+ execution_options=self._execution_options,
+ )
+
+ @property
+ def lazy_loaded_from(self):
+ """An :class:`.InstanceState` that is using this :class:`_query.Query`
+ for a lazy load operation.
+
+ .. deprecated:: 1.4 This attribute should be viewed via the
+ :attr:`.ORMExecuteState.lazy_loaded_from` attribute, within
+ the context of the :meth:`.SessionEvents.do_orm_execute`
+ event.
+
+ .. seealso::
+
+ :attr:`.ORMExecuteState.lazy_loaded_from`
+
+ """
+ return self.load_options._lazy_loaded_from
+
+ @property
+ def _current_path(self):
+ return self._compile_options._current_path
+
+ @_generative
+ def correlate(self, *fromclauses):
+ """Return a :class:`.Query` construct which will correlate the given
+ FROM clauses to that of an enclosing :class:`.Query` or
+ :func:`~.expression.select`.
+
+ The method here accepts mapped classes, :func:`.aliased` constructs,
+ and :func:`.mapper` constructs as arguments, which are resolved into
+ expression constructs, in addition to appropriate expression
+ constructs.
+
+ The correlation arguments are ultimately passed to
+ :meth:`_expression.Select.correlate`
+ after coercion to expression constructs.
+
+ The correlation arguments take effect in such cases
+ as when :meth:`_query.Query.from_self` is used, or when
+ a subquery as returned by :meth:`_query.Query.subquery` is
+ embedded in another :func:`_expression.select` construct.
+
+ """
+
+ self._auto_correlate = False
+ if fromclauses and fromclauses[0] in {None, False}:
+ self._correlate = ()
+ else:
+ self._correlate = set(self._correlate).union(
+ coercions.expect(roles.FromClauseRole, f) for f in fromclauses
+ )
+
+ @_generative
+ def autoflush(self, setting):
+ """Return a Query with a specific 'autoflush' setting.
+
+ As of SQLAlchemy 1.4, the :meth:`_orm.Query.autoflush` method
+ is equivalent to using the ``autoflush`` execution option at the
+ ORM level. See the section :ref:`orm_queryguide_autoflush` for
+ further background on this option.
+
+ """
+ self.load_options += {"_autoflush": setting}
+
+ @_generative
+ def populate_existing(self):
+ """Return a :class:`_query.Query`
+ that will expire and refresh all instances
+ as they are loaded, or reused from the current :class:`.Session`.
+
+ As of SQLAlchemy 1.4, the :meth:`_orm.Query.populate_existing` method
+ is equivalent to using the ``populate_existing`` execution option at
+ the ORM level. See the section :ref:`orm_queryguide_populate_existing`
+ for further background on this option.
+
+ """
+ self.load_options += {"_populate_existing": True}
+
+ @_generative
+ def _with_invoke_all_eagers(self, value):
+ """Set the 'invoke all eagers' flag which causes joined- and
+ subquery loaders to traverse into already-loaded related objects
+ and collections.
+
+ Default is that of :attr:`_query.Query._invoke_all_eagers`.
+
+ """
+ self.load_options += {"_invoke_all_eagers": value}
+
+ @util.deprecated_20(
+ ":meth:`_orm.Query.with_parent`",
+ alternative="Use the :func:`_orm.with_parent` standalone construct.",
+ becomes_legacy=True,
+ )
+ @util.preload_module("sqlalchemy.orm.relationships")
+ def with_parent(self, instance, property=None, from_entity=None): # noqa
+ """Add filtering criterion that relates the given instance
+ to a child object or collection, using its attribute state
+ as well as an established :func:`_orm.relationship()`
+ configuration.
+
+ The method uses the :func:`.with_parent` function to generate
+ the clause, the result of which is passed to
+ :meth:`_query.Query.filter`.
+
+ Parameters are the same as :func:`.with_parent`, with the exception
+ that the given property can be None, in which case a search is
+ performed against this :class:`_query.Query` object's target mapper.
+
+ :param instance:
+ An instance which has some :func:`_orm.relationship`.
+
+ :param property:
+ String property name, or class-bound attribute, which indicates
+ what relationship from the instance should be used to reconcile the
+ parent/child relationship.
+
+ :param from_entity:
+ Entity in which to consider as the left side. This defaults to the
+ "zero" entity of the :class:`_query.Query` itself.
+
+ """
+ relationships = util.preloaded.orm_relationships
+
+ if from_entity:
+ entity_zero = inspect(from_entity)
+ else:
+ entity_zero = _legacy_filter_by_entity_zero(self)
+ if property is None:
+ # TODO: deprecate, property has to be supplied
+ mapper = object_mapper(instance)
+
+ for prop in mapper.iterate_properties:
+ if (
+ isinstance(prop, relationships.RelationshipProperty)
+ and prop.mapper is entity_zero.mapper
+ ):
+ property = prop # noqa
+ break
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Could not locate a property which relates instances "
+ "of class '%s' to instances of class '%s'"
+ % (
+ entity_zero.mapper.class_.__name__,
+ instance.__class__.__name__,
+ )
+ )
+
+ return self.filter(with_parent(instance, property, entity_zero.entity))
+
+ @_generative
+ def add_entity(self, entity, alias=None):
+ """add a mapped entity to the list of result columns
+ to be returned."""
+
+ if alias is not None:
+ # TODO: deprecate
+ entity = aliased(entity, alias)
+
+ self._raw_columns = list(self._raw_columns)
+
+ self._raw_columns.append(
+ coercions.expect(
+ roles.ColumnsClauseRole, entity, apply_propagate_attrs=self
+ )
+ )
+
+ @_generative
+ def with_session(self, session):
+ """Return a :class:`_query.Query` that will use the given
+ :class:`.Session`.
+
+ While the :class:`_query.Query`
+ object is normally instantiated using the
+ :meth:`.Session.query` method, it is legal to build the
+ :class:`_query.Query`
+ directly without necessarily using a :class:`.Session`. Such a
+ :class:`_query.Query` object, or any :class:`_query.Query`
+ already associated
+ with a different :class:`.Session`, can produce a new
+ :class:`_query.Query`
+ object associated with a target session using this method::
+
+ from sqlalchemy.orm import Query
+
+ query = Query([MyClass]).filter(MyClass.id == 5)
+
+ result = query.with_session(my_session).one()
+
+ """
+
+ self.session = session
+
+ @util.deprecated_20(
+ ":meth:`_query.Query.from_self`",
+ alternative="The new approach is to use the :func:`.orm.aliased` "
+ "construct in conjunction with a subquery. See the section "
+ ":ref:`Selecting from the query itself as a subquery "
+ "<migration_20_query_from_self>` in the 2.0 migration notes for an "
+ "example.",
+ )
+ def from_self(self, *entities):
+ r"""return a Query that selects from this Query's
+ SELECT statement.
+
+ :meth:`_query.Query.from_self` essentially turns the SELECT statement
+ into a SELECT of itself. Given a query such as::
+
+ q = session.query(User).filter(User.name.like('e%'))
+
+ Given the :meth:`_query.Query.from_self` version::
+
+ q = session.query(User).filter(User.name.like('e%')).from_self()
+
+ This query renders as:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1) AS anon_1
+
+ There are lots of cases where :meth:`_query.Query.from_self`
+ may be useful.
+ A simple one is where above, we may want to apply a row LIMIT to
+ the set of user objects we query against, and then apply additional
+ joins against that row-limited set::
+
+ q = session.query(User).filter(User.name.like('e%')).\
+ limit(5).from_self().\
+ join(User.addresses).filter(Address.email.like('q%'))
+
+ The above query joins to the ``Address`` entity but only against the
+ first five results of the ``User`` query:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1
+
+ **Automatic Aliasing**
+
+ Another key behavior of :meth:`_query.Query.from_self`
+ is that it applies
+ **automatic aliasing** to the entities inside the subquery, when
+ they are referenced on the outside. Above, if we continue to
+ refer to the ``User`` entity without any additional aliasing applied
+ to it, those references will be in terms of the subquery::
+
+ q = session.query(User).filter(User.name.like('e%')).\
+ limit(5).from_self().\
+ join(User.addresses).filter(Address.email.like('q%')).\
+ order_by(User.name)
+
+ The ORDER BY against ``User.name`` is aliased to be in terms of the
+ inner subquery:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
+
+ The automatic aliasing feature only works in a **limited** way,
+ for simple filters and orderings. More ambitious constructions
+ such as referring to the entity in joins should prefer to use
+ explicit subquery objects, typically making use of the
+ :meth:`_query.Query.subquery`
+ method to produce an explicit subquery object.
+ Always test the structure of queries by viewing the SQL to ensure
+ a particular structure does what's expected!
+
+ **Changing the Entities**
+
+ :meth:`_query.Query.from_self`
+ also includes the ability to modify what
+ columns are being queried. In our example, we want ``User.id``
+ to be queried by the inner query, so that we can join to the
+ ``Address`` entity on the outside, but we only wanted the outer
+ query to return the ``Address.email`` column::
+
+ q = session.query(User).filter(User.name.like('e%')).\
+ limit(5).from_self(Address.email).\
+ join(User.addresses).filter(Address.email.like('q%'))
+
+ yielding:
+
+ .. sourcecode:: sql
+
+ SELECT address.email AS address_email
+ FROM (SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user"
+ WHERE "user".name LIKE :name_1
+ LIMIT :param_1) AS anon_1
+ JOIN address ON anon_1.user_id = address.user_id
+ WHERE address.email LIKE :email_1
+
+ **Looking out for Inner / Outer Columns**
+
+ Keep in mind that when referring to columns that originate from
+ inside the subquery, we need to ensure they are present in the
+ columns clause of the subquery itself; this is an ordinary aspect of
+ SQL. For example, if we wanted to load from a joined entity inside
+ the subquery using :func:`.contains_eager`, we need to add those
+ columns. Below illustrates a join of ``Address`` to ``User``,
+ then a subquery, and then we'd like :func:`.contains_eager` to access
+ the ``User`` columns::
+
+ q = session.query(Address).join(Address.user).\
+ filter(User.name.like('e%'))
+
+ q = q.add_entity(User).from_self().\
+ options(contains_eager(Address.user))
+
+ We use :meth:`_query.Query.add_entity` above **before** we call
+ :meth:`_query.Query.from_self`
+ so that the ``User`` columns are present
+ in the inner subquery, so that they are available to the
+ :func:`.contains_eager` modifier we are using on the outside,
+ producing:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.address_id AS anon_1_address_id,
+ anon_1.address_email AS anon_1_address_email,
+ anon_1.address_user_id AS anon_1_address_user_id,
+ anon_1.user_id AS anon_1_user_id,
+ anon_1.user_name AS anon_1_user_name
+ FROM (
+ SELECT address.id AS address_id,
+ address.email AS address_email,
+ address.user_id AS address_user_id,
+ "user".id AS user_id,
+ "user".name AS user_name
+ FROM address JOIN "user" ON "user".id = address.user_id
+ WHERE "user".name LIKE :name_1) AS anon_1
+
+ If we didn't call ``add_entity(User)``, but still asked
+ :func:`.contains_eager` to load the ``User`` entity, it would be
+ forced to add the table on the outside without the correct
+ join criteria - note the ``anon1, "user"`` phrase at
+ the end:
+
+ .. sourcecode:: sql
+
+ -- incorrect query
+ SELECT anon_1.address_id AS anon_1_address_id,
+ anon_1.address_email AS anon_1_address_email,
+ anon_1.address_user_id AS anon_1_address_user_id,
+ "user".id AS user_id,
+ "user".name AS user_name
+ FROM (
+ SELECT address.id AS address_id,
+ address.email AS address_email,
+ address.user_id AS address_user_id
+ FROM address JOIN "user" ON "user".id = address.user_id
+ WHERE "user".name LIKE :name_1) AS anon_1, "user"
+
+ :param \*entities: optional list of entities which will replace
+ those being selected.
+
+ """
+ return self._from_self(*entities)
+
+ def _from_self(self, *entities):
+ fromclause = (
+ self.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+ .correlate(None)
+ .subquery()
+ ._anonymous_fromclause()
+ )
+
+ q = self._from_selectable(fromclause)
+
+ if entities:
+ q._set_entities(entities)
+ return q
+
+ @_generative
+ def _set_enable_single_crit(self, val):
+ self._compile_options += {"_enable_single_crit": val}
+
+ @_generative
+ def _from_selectable(self, fromclause, set_entity_from=True):
+ for attr in (
+ "_where_criteria",
+ "_order_by_clauses",
+ "_group_by_clauses",
+ "_limit_clause",
+ "_offset_clause",
+ "_last_joined_entity",
+ "_legacy_setup_joins",
+ "_memoized_select_entities",
+ "_distinct",
+ "_distinct_on",
+ "_having_criteria",
+ "_prefixes",
+ "_suffixes",
+ ):
+ self.__dict__.pop(attr, None)
+ self._set_select_from([fromclause], set_entity_from)
+ self._compile_options += {
+ "_enable_single_crit": False,
+ }
+
+ # this enables clause adaptation for non-ORM
+ # expressions.
+ # legacy. see test/orm/test_froms.py for various
+ # "oldstyle" tests that rely on this and the corresponding
+ # "newtyle" that do not.
+ self._compile_options += {"_orm_only_from_obj_alias": False}
+
+ @util.deprecated(
+ "1.4",
+ ":meth:`_query.Query.values` "
+ "is deprecated and will be removed in a "
+ "future release. Please use :meth:`_query.Query.with_entities`",
+ )
+ def values(self, *columns):
+ """Return an iterator yielding result tuples corresponding
+ to the given list of columns
+
+ """
+
+ if not columns:
+ return iter(())
+ q = self._clone().enable_eagerloads(False)
+ q._set_entities(columns)
+ if not q.load_options._yield_per:
+ q.load_options += {"_yield_per": 10}
+ return iter(q)
+
+ _values = values
+
+ @util.deprecated(
+ "1.4",
+ ":meth:`_query.Query.value` "
+ "is deprecated and will be removed in a "
+ "future release. Please use :meth:`_query.Query.with_entities` "
+ "in combination with :meth:`_query.Query.scalar`",
+ )
+ def value(self, column):
+ """Return a scalar result corresponding to the given
+ column expression.
+
+ """
+ try:
+ return next(self.values(column))[0]
+ except StopIteration:
+ return None
+
+ @_generative
+ def with_entities(self, *entities):
+ r"""Return a new :class:`_query.Query`
+ replacing the SELECT list with the
+ given entities.
+
+ e.g.::
+
+ # Users, filtered on some arbitrary criterion
+ # and then ordered by related email address
+ q = session.query(User).\
+ join(User.address).\
+ filter(User.name.like('%ed%')).\
+ order_by(Address.email)
+
+ # given *only* User.id==5, Address.email, and 'q', what
+ # would the *next* User in the result be ?
+ subq = q.with_entities(Address.email).\
+ order_by(None).\
+ filter(User.id==5).\
+ subquery()
+ q = q.join((subq, subq.c.email < Address.email)).\
+ limit(1)
+
+ """
+ _MemoizedSelectEntities._generate_for_statement(self)
+ self._set_entities(entities)
+
+ @_generative
+ def add_columns(self, *column):
+ """Add one or more column expressions to the list
+ of result columns to be returned."""
+
+ self._raw_columns = list(self._raw_columns)
+
+ self._raw_columns.extend(
+ coercions.expect(
+ roles.ColumnsClauseRole,
+ c,
+ apply_propagate_attrs=self,
+ post_inspect=True,
+ )
+ for c in column
+ )
+
+ @util.deprecated(
+ "1.4",
+ ":meth:`_query.Query.add_column` "
+ "is deprecated and will be removed in a "
+ "future release. Please use :meth:`_query.Query.add_columns`",
+ )
+ def add_column(self, column):
+ """Add a column expression to the list of result columns to be
+ returned.
+
+ """
+ return self.add_columns(column)
+
+ @_generative
+ def options(self, *args):
+ """Return a new :class:`_query.Query` object,
+ applying the given list of
+ mapper options.
+
+ Most supplied options regard changing how column- and
+ relationship-mapped attributes are loaded.
+
+ .. seealso::
+
+ :ref:`deferred_options`
+
+ :ref:`relationship_loader_options`
+
+ """
+
+ opts = tuple(util.flatten_iterator(args))
+ if self._compile_options._current_path:
+ for opt in opts:
+ if opt._is_legacy_option:
+ opt.process_query_conditionally(self)
+ else:
+ for opt in opts:
+ if opt._is_legacy_option:
+ opt.process_query(self)
+
+ self._with_options += opts
+
+ def with_transformation(self, fn):
+ """Return a new :class:`_query.Query` object transformed by
+ the given function.
+
+ E.g.::
+
+ def filter_something(criterion):
+ def transform(q):
+ return q.filter(criterion)
+ return transform
+
+ q = q.with_transformation(filter_something(x==5))
+
+ This allows ad-hoc recipes to be created for :class:`_query.Query`
+ objects. See the example at :ref:`hybrid_transformers`.
+
+ """
+ return fn(self)
+
+ def get_execution_options(self):
+ """Get the non-SQL options which will take effect during execution.
+
+ .. versionadded:: 1.3
+
+ .. seealso::
+
+ :meth:`_query.Query.execution_options`
+ """
+ return self._execution_options
+
+ @_generative
+ def execution_options(self, **kwargs):
+ """Set non-SQL options which take effect during execution.
+
+ Options allowed here include all of those accepted by
+ :meth:`_engine.Connection.execution_options`, as well as a series
+ of ORM specific options:
+
+ ``populate_existing=True`` - equivalent to using
+ :meth:`_orm.Query.populate_existing`
+
+ ``autoflush=True|False`` - equivalent to using
+ :meth:`_orm.Query.autoflush`
+
+ ``yield_per=<value>`` - equivalent to using
+ :meth:`_orm.Query.yield_per`
+
+ Note that the ``stream_results`` execution option is enabled
+ automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
+ method or execution option is used.
+
+ .. versionadded:: 1.4 - added ORM options to
+ :meth:`_orm.Query.execution_options`
+
+ The execution options may also be specified on a per execution basis
+ when using :term:`2.0 style` queries via the
+ :paramref:`_orm.Session.execution_options` parameter.
+
+ .. warning:: The
+ :paramref:`_engine.Connection.execution_options.stream_results`
+ parameter should not be used at the level of individual ORM
+ statement executions, as the :class:`_orm.Session` will not track
+ objects from different schema translate maps within a single
+ session. For multiple schema translate maps within the scope of a
+ single :class:`_orm.Session`, see :ref:`examples_sharding`.
+
+
+ .. seealso::
+
+ :ref:`engine_stream_results`
+
+ :meth:`_query.Query.get_execution_options`
+
+ """
+ self._execution_options = self._execution_options.union(kwargs)
+
+ @_generative
+ def with_for_update(
+ self,
+ read=False,
+ nowait=False,
+ of=None,
+ skip_locked=False,
+ key_share=False,
+ ):
+ """return a new :class:`_query.Query`
+ with the specified options for the
+ ``FOR UPDATE`` clause.
+
+ The behavior of this method is identical to that of
+ :meth:`_expression.GenerativeSelect.with_for_update`.
+ When called with no arguments,
+ the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
+ appended. When additional arguments are specified, backend-specific
+ options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
+ can take effect.
+
+ E.g.::
+
+ q = sess.query(User).populate_existing().with_for_update(nowait=True, of=User)
+
+ The above query on a PostgreSQL backend will render like::
+
+ SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
+
+ .. warning::
+
+ Using ``with_for_update`` in the context of eager loading
+ relationships is not officially supported or recommended by
+ SQLAlchemy and may not work with certain queries on various
+ database backends. When ``with_for_update`` is successfully used
+ with a query that involves :func:`_orm.joinedload`, SQLAlchemy will
+ attempt to emit SQL that locks all involved tables.
+
+ .. note:: It is generally a good idea to combine the use of the
+ :meth:`_orm.Query.populate_existing` method when using the
+ :meth:`_orm.Query.with_for_update` method. The purpose of
+ :meth:`_orm.Query.populate_existing` is to force all the data read
+ from the SELECT to be populated into the ORM objects returned,
+ even if these objects are already in the :term:`identity map`.
+
+ .. seealso::
+
+ :meth:`_expression.GenerativeSelect.with_for_update`
+ - Core level method with
+ full argument and behavioral description.
+
+ :meth:`_orm.Query.populate_existing` - overwrites attributes of
+ objects already loaded in the identity map.
+
+ """ # noqa: E501
+
+ self._for_update_arg = ForUpdateArg(
+ read=read,
+ nowait=nowait,
+ of=of,
+ skip_locked=skip_locked,
+ key_share=key_share,
+ )
+
+ @_generative
+ def params(self, *args, **kwargs):
+ r"""Add values for bind parameters which may have been
+ specified in filter().
+
+ Parameters may be specified using \**kwargs, or optionally a single
+ dictionary as the first positional argument. The reason for both is
+ that \**kwargs is convenient, however some parameter dictionaries
+ contain unicode keys in which case \**kwargs cannot be used.
+
+ """
+ if len(args) == 1:
+ kwargs.update(args[0])
+ elif len(args) > 0:
+ raise sa_exc.ArgumentError(
+ "params() takes zero or one positional argument, "
+ "which is a dictionary."
+ )
+ self._params = self._params.union(kwargs)
+
+ def where(self, *criterion):
+ """A synonym for :meth:`.Query.filter`.
+
+ .. versionadded:: 1.4
+
+ """
+ return self.filter(*criterion)
+
+ @_generative
+ @_assertions(_no_statement_condition, _no_limit_offset)
+ def filter(self, *criterion):
+ r"""Apply the given filtering criterion to a copy
+ of this :class:`_query.Query`, using SQL expressions.
+
+ e.g.::
+
+ session.query(MyClass).filter(MyClass.name == 'some name')
+
+ Multiple criteria may be specified as comma separated; the effect
+ is that they will be joined together using the :func:`.and_`
+ function::
+
+ session.query(MyClass).\
+ filter(MyClass.name == 'some name', MyClass.id > 5)
+
+ The criterion is any SQL expression object applicable to the
+ WHERE clause of a select. String expressions are coerced
+ into SQL expression constructs via the :func:`_expression.text`
+ construct.
+
+ .. seealso::
+
+ :meth:`_query.Query.filter_by` - filter on keyword expressions.
+
+ """
+ for criterion in list(criterion):
+ criterion = coercions.expect(
+ roles.WhereHavingRole, criterion, apply_propagate_attrs=self
+ )
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if self._aliased_generation:
+ criterion = sql_util._deep_annotate(
+ criterion, {"aliased_generation": self._aliased_generation}
+ )
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ self._where_criteria += (criterion,)
+
+ @util.memoized_property
+ def _last_joined_entity(self):
+ if self._legacy_setup_joins:
+ return _legacy_determine_last_joined_entity(
+ self._legacy_setup_joins, self._entity_from_pre_ent_zero()
+ )
+ else:
+ return None
+
+ def _filter_by_zero(self):
+ """for the filter_by() method, return the target entity for which
+ we will attempt to derive an expression from based on string name.
+
+ """
+
+ if self._legacy_setup_joins:
+ _last_joined_entity = self._last_joined_entity
+ if _last_joined_entity is not None:
+ return _last_joined_entity
+
+ # discussion related to #7239
+ # special check determines if we should try to derive attributes
+ # for filter_by() from the "from object", i.e., if the user
+ # called query.select_from(some selectable).filter_by(some_attr=value).
+ # We don't want to do that in the case that methods like
+ # from_self(), select_entity_from(), or a set op like union() were
+ # called; while these methods also place a
+ # selectable in the _from_obj collection, they also set up
+ # the _set_base_alias boolean which turns on the whole "adapt the
+ # entity to this selectable" thing, meaning the query still continues
+ # to construct itself in terms of the lead entity that was passed
+ # to query(), e.g. query(User).from_self() is still in terms of User,
+ # and not the subquery that from_self() created. This feature of
+ # "implicitly adapt all occurrences of entity X to some arbitrary
+ # subquery" is the main thing I am trying to do away with in 2.0 as
+ # users should now used aliased() for that, but I can't entirely get
+ # rid of it due to query.union() and other set ops relying upon it.
+ #
+ # compare this to the base Select()._filter_by_zero() which can
+ # just return self._from_obj[0] if present, because there is no
+ # "_set_base_alias" feature.
+ #
+ # IOW, this conditional essentially detects if
+ # "select_from(some_selectable)" has been called, as opposed to
+ # "select_entity_from()", "from_self()"
+ # or "union() / some_set_op()".
+ if self._from_obj and not self._compile_options._set_base_alias:
+ return self._from_obj[0]
+
+ return self._raw_columns[0]
+
+ def filter_by(self, **kwargs):
+ r"""Apply the given filtering criterion to a copy
+ of this :class:`_query.Query`, using keyword expressions.
+
+ e.g.::
+
+ session.query(MyClass).filter_by(name = 'some name')
+
+ Multiple criteria may be specified as comma separated; the effect
+ is that they will be joined together using the :func:`.and_`
+ function::
+
+ session.query(MyClass).\
+ filter_by(name = 'some name', id = 5)
+
+ The keyword expressions are extracted from the primary
+ entity of the query, or the last entity that was the
+ target of a call to :meth:`_query.Query.join`.
+
+ .. seealso::
+
+ :meth:`_query.Query.filter` - filter on SQL expressions.
+
+ """
+ from_entity = self._filter_by_zero()
+ if from_entity is None:
+ raise sa_exc.InvalidRequestError(
+ "Can't use filter_by when the first entity '%s' of a query "
+ "is not a mapped class. Please use the filter method instead, "
+ "or change the order of the entities in the query"
+ % self._query_entity_zero()
+ )
+
+ clauses = [
+ _entity_namespace_key(from_entity, key) == value
+ for key, value in kwargs.items()
+ ]
+ return self.filter(*clauses)
+
+ @_generative
+ @_assertions(_no_statement_condition, _no_limit_offset)
+ def order_by(self, *clauses):
+ """Apply one or more ORDER BY criteria to the query and return
+ the newly resulting :class:`_query.Query`.
+
+ e.g.::
+
+ q = session.query(Entity).order_by(Entity.id, Entity.name)
+
+ All existing ORDER BY criteria may be cancelled by passing
+ ``None`` by itself. New ORDER BY criteria may then be added by
+ invoking :meth:`_orm.Query.order_by` again, e.g.::
+
+ # will erase all ORDER BY and ORDER BY new_col alone
+ q = q.order_by(None).order_by(new_col)
+
+ .. seealso::
+
+ These sections describe ORDER BY in terms of :term:`2.0 style`
+ invocation but apply to :class:`_orm.Query` as well:
+
+ :ref:`tutorial_order_by` - in the :ref:`unified_tutorial`
+
+ :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
+
+ """
+
+ if len(clauses) == 1 and (clauses[0] is None or clauses[0] is False):
+ self._order_by_clauses = ()
+ else:
+ criterion = tuple(
+ coercions.expect(roles.OrderByRole, clause)
+ for clause in clauses
+ )
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if self._aliased_generation:
+ criterion = tuple(
+ [
+ sql_util._deep_annotate(
+ o, {"aliased_generation": self._aliased_generation}
+ )
+ for o in criterion
+ ]
+ )
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ self._order_by_clauses += criterion
+
+ @_generative
+ @_assertions(_no_statement_condition, _no_limit_offset)
+ def group_by(self, *clauses):
+ """Apply one or more GROUP BY criterion to the query and return
+ the newly resulting :class:`_query.Query`.
+
+ All existing GROUP BY settings can be suppressed by
+ passing ``None`` - this will suppress any GROUP BY configured
+ on mappers as well.
+
+ .. seealso::
+
+ These sections describe GROUP BY in terms of :term:`2.0 style`
+ invocation but apply to :class:`_orm.Query` as well:
+
+ :ref:`tutorial_group_by_w_aggregates` - in the
+ :ref:`unified_tutorial`
+
+ :ref:`tutorial_order_by_label` - in the :ref:`unified_tutorial`
+
+ """
+
+ if len(clauses) == 1 and (clauses[0] is None or clauses[0] is False):
+ self._group_by_clauses = ()
+ else:
+ criterion = tuple(
+ coercions.expect(roles.GroupByRole, clause)
+ for clause in clauses
+ )
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if self._aliased_generation:
+ criterion = tuple(
+ [
+ sql_util._deep_annotate(
+ o, {"aliased_generation": self._aliased_generation}
+ )
+ for o in criterion
+ ]
+ )
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ self._group_by_clauses += criterion
+
+ @_generative
+ @_assertions(_no_statement_condition, _no_limit_offset)
+ def having(self, criterion):
+ r"""Apply a HAVING criterion to the query and return the
+ newly resulting :class:`_query.Query`.
+
+ :meth:`_query.Query.having` is used in conjunction with
+ :meth:`_query.Query.group_by`.
+
+ HAVING criterion makes it possible to use filters on aggregate
+ functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
+
+ q = session.query(User.id).\
+ join(User.addresses).\
+ group_by(User.id).\
+ having(func.count(Address.id) > 2)
+
+ """
+
+ self._having_criteria += (
+ coercions.expect(
+ roles.WhereHavingRole, criterion, apply_propagate_attrs=self
+ ),
+ )
+
+ def _set_op(self, expr_fn, *q):
+ return self._from_selectable(expr_fn(*([self] + list(q))).subquery())
+
+ def union(self, *q):
+ """Produce a UNION of this Query against one or more queries.
+
+ e.g.::
+
+ q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
+ q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
+
+ q3 = q1.union(q2)
+
+ The method accepts multiple Query objects so as to control
+ the level of nesting. A series of ``union()`` calls such as::
+
+ x.union(y).union(z).all()
+
+ will nest on each ``union()``, and produces::
+
+ SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
+ SELECT * FROM y) UNION SELECT * FROM Z)
+
+ Whereas::
+
+ x.union(y, z).all()
+
+ produces::
+
+ SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
+ SELECT * FROM Z)
+
+ Note that many database backends do not allow ORDER BY to
+ be rendered on a query called within UNION, EXCEPT, etc.
+ To disable all ORDER BY clauses including those configured
+ on mappers, issue ``query.order_by(None)`` - the resulting
+ :class:`_query.Query` object will not render ORDER BY within
+ its SELECT statement.
+
+ """
+ return self._set_op(expression.union, *q)
+
+ def union_all(self, *q):
+ """Produce a UNION ALL of this Query against one or more queries.
+
+ Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
+ that method for usage examples.
+
+ """
+ return self._set_op(expression.union_all, *q)
+
+ def intersect(self, *q):
+ """Produce an INTERSECT of this Query against one or more queries.
+
+ Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
+ that method for usage examples.
+
+ """
+ return self._set_op(expression.intersect, *q)
+
+ def intersect_all(self, *q):
+ """Produce an INTERSECT ALL of this Query against one or more queries.
+
+ Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
+ that method for usage examples.
+
+ """
+ return self._set_op(expression.intersect_all, *q)
+
+ def except_(self, *q):
+ """Produce an EXCEPT of this Query against one or more queries.
+
+ Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
+ that method for usage examples.
+
+ """
+ return self._set_op(expression.except_, *q)
+
+ def except_all(self, *q):
+ """Produce an EXCEPT ALL of this Query against one or more queries.
+
+ Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
+ that method for usage examples.
+
+ """
+ return self._set_op(expression.except_all, *q)
+
+ def _next_aliased_generation(self):
+ if "_aliased_generation_counter" not in self.__dict__:
+ self._aliased_generation_counter = 0
+ self._aliased_generation_counter += 1
+ return self._aliased_generation_counter
+
+ @_generative
+ @_assertions(_no_statement_condition, _no_limit_offset)
+ def join(self, target, *props, **kwargs):
+ r"""Create a SQL JOIN against this :class:`_query.Query`
+ object's criterion
+ and apply generatively, returning the newly resulting
+ :class:`_query.Query`.
+
+ **Simple Relationship Joins**
+
+ Consider a mapping between two classes ``User`` and ``Address``,
+ with a relationship ``User.addresses`` representing a collection
+ of ``Address`` objects associated with each ``User``. The most
+ common usage of :meth:`_query.Query.join`
+ is to create a JOIN along this
+ relationship, using the ``User.addresses`` attribute as an indicator
+ for how this should occur::
+
+ q = session.query(User).join(User.addresses)
+
+ Where above, the call to :meth:`_query.Query.join` along
+ ``User.addresses`` will result in SQL approximately equivalent to::
+
+ SELECT user.id, user.name
+ FROM user JOIN address ON user.id = address.user_id
+
+ In the above example we refer to ``User.addresses`` as passed to
+ :meth:`_query.Query.join` as the "on clause", that is, it indicates
+ how the "ON" portion of the JOIN should be constructed.
+
+ To construct a chain of joins, multiple :meth:`_query.Query.join`
+ calls may be used. The relationship-bound attribute implies both
+ the left and right side of the join at once::
+
+ q = session.query(User).\
+ join(User.orders).\
+ join(Order.items).\
+ join(Item.keywords)
+
+ .. note:: as seen in the above example, **the order in which each
+ call to the join() method occurs is important**. Query would not,
+ for example, know how to join correctly if we were to specify
+ ``User``, then ``Item``, then ``Order``, in our chain of joins; in
+ such a case, depending on the arguments passed, it may raise an
+ error that it doesn't know how to join, or it may produce invalid
+ SQL in which case the database will raise an error. In correct
+ practice, the
+ :meth:`_query.Query.join` method is invoked in such a way that lines
+ up with how we would want the JOIN clauses in SQL to be
+ rendered, and each call should represent a clear link from what
+ precedes it.
+
+ **Joins to a Target Entity or Selectable**
+
+ A second form of :meth:`_query.Query.join` allows any mapped entity or
+ core selectable construct as a target. In this usage,
+ :meth:`_query.Query.join` will attempt to create a JOIN along the
+ natural foreign key relationship between two entities::
+
+ q = session.query(User).join(Address)
+
+ In the above calling form, :meth:`_query.Query.join` is called upon to
+ create the "on clause" automatically for us. This calling form will
+ ultimately raise an error if either there are no foreign keys between
+ the two entities, or if there are multiple foreign key linkages between
+ the target entity and the entity or entities already present on the
+ left side such that creating a join requires more information. Note
+ that when indicating a join to a target without any ON clause, ORM
+ configured relationships are not taken into account.
+
+ **Joins to a Target with an ON Clause**
+
+ The third calling form allows both the target entity as well
+ as the ON clause to be passed explicitly. A example that includes
+ a SQL expression as the ON clause is as follows::
+
+ q = session.query(User).join(Address, User.id==Address.user_id)
+
+ The above form may also use a relationship-bound attribute as the
+ ON clause as well::
+
+ q = session.query(User).join(Address, User.addresses)
+
+ The above syntax can be useful for the case where we wish
+ to join to an alias of a particular target entity. If we wanted
+ to join to ``Address`` twice, it could be achieved using two
+ aliases set up using the :func:`~sqlalchemy.orm.aliased` function::
+
+ a1 = aliased(Address)
+ a2 = aliased(Address)
+
+ q = session.query(User).\
+ join(a1, User.addresses).\
+ join(a2, User.addresses).\
+ filter(a1.email_address=='ed@foo.com').\
+ filter(a2.email_address=='ed@bar.com')
+
+ The relationship-bound calling form can also specify a target entity
+ using the :meth:`_orm.PropComparator.of_type` method; a query
+ equivalent to the one above would be::
+
+ a1 = aliased(Address)
+ a2 = aliased(Address)
+
+ q = session.query(User).\
+ join(User.addresses.of_type(a1)).\
+ join(User.addresses.of_type(a2)).\
+ filter(a1.email_address == 'ed@foo.com').\
+ filter(a2.email_address == 'ed@bar.com')
+
+ **Augmenting Built-in ON Clauses**
+
+ As a substitute for providing a full custom ON condition for an
+ existing relationship, the :meth:`_orm.PropComparator.and_` function
+ may be applied to a relationship attribute to augment additional
+ criteria into the ON clause; the additional criteria will be combined
+ with the default criteria using AND::
+
+ q = session.query(User).join(
+ User.addresses.and_(Address.email_address != 'foo@bar.com')
+ )
+
+ .. versionadded:: 1.4
+
+ **Joining to Tables and Subqueries**
+
+
+ The target of a join may also be any table or SELECT statement,
+ which may be related to a target entity or not. Use the
+ appropriate ``.subquery()`` method in order to make a subquery
+ out of a query::
+
+ subq = session.query(Address).\
+ filter(Address.email_address == 'ed@foo.com').\
+ subquery()
+
+
+ q = session.query(User).join(
+ subq, User.id == subq.c.user_id
+ )
+
+ Joining to a subquery in terms of a specific relationship and/or
+ target entity may be achieved by linking the subquery to the
+ entity using :func:`_orm.aliased`::
+
+ subq = session.query(Address).\
+ filter(Address.email_address == 'ed@foo.com').\
+ subquery()
+
+ address_subq = aliased(Address, subq)
+
+ q = session.query(User).join(
+ User.addresses.of_type(address_subq)
+ )
+
+
+ **Controlling what to Join From**
+
+ In cases where the left side of the current state of
+ :class:`_query.Query` is not in line with what we want to join from,
+ the :meth:`_query.Query.select_from` method may be used::
+
+ q = session.query(Address).select_from(User).\
+ join(User.addresses).\
+ filter(User.name == 'ed')
+
+ Which will produce SQL similar to::
+
+ SELECT address.* FROM user
+ JOIN address ON user.id=address.user_id
+ WHERE user.name = :name_1
+
+ **Legacy Features of Query.join()**
+
+ .. deprecated:: 1.4 The following features are deprecated and will
+ be removed in SQLAlchemy 2.0.
+
+ The :meth:`_query.Query.join` method currently supports several
+ usage patterns and arguments that are considered to be legacy
+ as of SQLAlchemy 1.3. A deprecation path will follow
+ in the 1.4 series for the following features:
+
+
+ * Joining on relationship names rather than attributes::
+
+ session.query(User).join("addresses")
+
+ **Why it's legacy**: the string name does not provide enough context
+ for :meth:`_query.Query.join` to always know what is desired,
+ notably in that there is no indication of what the left side
+ of the join should be. This gives rise to flags like
+ ``from_joinpoint`` as well as the ability to place several
+ join clauses in a single :meth:`_query.Query.join` call
+ which don't solve the problem fully while also
+ adding new calling styles that are unnecessary and expensive to
+ accommodate internally.
+
+ **Modern calling pattern**: Use the actual relationship,
+ e.g. ``User.addresses`` in the above case::
+
+ session.query(User).join(User.addresses)
+
+ * Automatic aliasing with the ``aliased=True`` flag::
+
+ session.query(Node).join(Node.children, aliased=True).\
+ filter(Node.name == 'some name')
+
+ **Why it's legacy**: the automatic aliasing feature of
+ :class:`_query.Query` is intensely complicated, both in its internal
+ implementation as well as in its observed behavior, and is almost
+ never used. It is difficult to know upon inspection where and when
+ its aliasing of a target entity, ``Node`` in the above case, will be
+ applied and when it won't, and additionally the feature has to use
+ very elaborate heuristics to achieve this implicit behavior.
+
+ **Modern calling pattern**: Use the :func:`_orm.aliased` construct
+ explicitly::
+
+ from sqlalchemy.orm import aliased
+
+ n1 = aliased(Node)
+
+ session.query(Node).join(Node.children.of_type(n1)).\
+ filter(n1.name == 'some name')
+
+ * Multiple joins in one call::
+
+ session.query(User).join("orders", "items")
+
+ session.query(User).join(User.orders, Order.items)
+
+ session.query(User).join(
+ (Order, User.orders),
+ (Item, Item.order_id == Order.id)
+ )
+
+ session.query(User).join(Order, Item)
+
+ # ... and several more forms actually
+
+ **Why it's legacy**: being able to chain multiple ON clauses in one
+ call to :meth:`_query.Query.join` is yet another attempt to solve
+ the problem of being able to specify what entity to join from,
+ and is the source of a large variety of potential calling patterns
+ that are internally expensive and complicated to parse and
+ accommodate.
+
+ **Modern calling pattern**: Use relationship-bound attributes
+ or SQL-oriented ON clauses within separate calls, so that
+ each call to :meth:`_query.Query.join` knows what the left
+ side should be::
+
+ session.query(User).join(User.orders).join(
+ Item, Item.order_id == Order.id)
+
+
+ :param \*props: Incoming arguments for :meth:`_query.Query.join`,
+ the props collection in modern use should be considered to be a one
+ or two argument form, either as a single "target" entity or ORM
+ attribute-bound relationship, or as a target entity plus an "on
+ clause" which may be a SQL expression or ORM attribute-bound
+ relationship.
+
+ :param isouter=False: If True, the join used will be a left outer join,
+ just as if the :meth:`_query.Query.outerjoin` method were called.
+
+ :param full=False: render FULL OUTER JOIN; implies ``isouter``.
+
+ .. versionadded:: 1.1
+
+ :param from_joinpoint=False: When using ``aliased=True``, a setting
+ of True here will cause the join to be from the most recent
+ joined target, rather than starting back from the original
+ FROM clauses of the query.
+
+ .. note:: This flag is considered legacy.
+
+ :param aliased=False: If True, indicate that the JOIN target should be
+ anonymously aliased. Subsequent calls to :meth:`_query.Query.filter`
+ and similar will adapt the incoming criterion to the target
+ alias, until :meth:`_query.Query.reset_joinpoint` is called.
+
+ .. note:: This flag is considered legacy.
+
+ .. seealso::
+
+ :ref:`ormtutorial_joins` in the ORM tutorial.
+
+ :ref:`inheritance_toplevel` for details on how
+ :meth:`_query.Query.join` is used for inheritance relationships.
+
+ :func:`_orm.join` - a standalone ORM-level join function,
+ used internally by :meth:`_query.Query.join`, which in previous
+ SQLAlchemy versions was the primary ORM-level joining interface.
+
+ """
+
+ aliased, from_joinpoint, isouter, full = (
+ kwargs.pop("aliased", False),
+ kwargs.pop("from_joinpoint", False),
+ kwargs.pop("isouter", False),
+ kwargs.pop("full", False),
+ )
+
+ if aliased or from_joinpoint:
+ util.warn_deprecated_20(
+ "The ``aliased`` and ``from_joinpoint`` keyword arguments "
+ "to Query.join() are deprecated and will be removed "
+ "in SQLAlchemy 2.0."
+ )
+
+ if kwargs:
+ raise TypeError(
+ "unknown arguments: %s" % ", ".join(sorted(kwargs))
+ )
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if not from_joinpoint:
+ self._last_joined_entity = None
+ self._aliased_generation = None
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ if props:
+ onclause, legacy = props[0], props[1:]
+ else:
+ onclause = legacy = None
+
+ if not legacy and onclause is None and not isinstance(target, tuple):
+ # non legacy argument form
+ _props = [(target,)]
+ elif (
+ not legacy
+ and isinstance(
+ target,
+ (
+ expression.Selectable,
+ type,
+ AliasedClass,
+ types.FunctionType,
+ ),
+ )
+ and isinstance(
+ onclause,
+ (
+ elements.ColumnElement,
+ str,
+ interfaces.PropComparator,
+ types.FunctionType,
+ ),
+ )
+ ):
+ # non legacy argument form
+ _props = [(target, onclause)]
+ else:
+ # legacy forms. more time consuming :)
+ _props = []
+ _single = []
+ for prop in (target,) + props:
+ if isinstance(prop, tuple):
+ util.warn_deprecated_20(
+ "Query.join() will no longer accept tuples as "
+ "arguments in SQLAlchemy 2.0."
+ )
+ if _single:
+ _props.extend((_s,) for _s in _single)
+ _single = []
+
+ # this checks for an extremely ancient calling form of
+ # reversed tuples.
+ if isinstance(prop[0], (str, interfaces.PropComparator)):
+ prop = (prop[1], prop[0])
+
+ _props.append(prop)
+ else:
+ _single.append(prop)
+ if _single:
+ _props.extend((_s,) for _s in _single)
+
+ # legacy vvvvvvvvvvvvvvvvvvvvvvvvvvv
+ if aliased:
+ self._aliased_generation = self._next_aliased_generation()
+
+ if self._aliased_generation:
+ _props = [
+ (
+ prop[0],
+ sql_util._deep_annotate(
+ prop[1],
+ {"aliased_generation": self._aliased_generation},
+ )
+ if isinstance(prop[1], expression.ClauseElement)
+ else prop[1],
+ )
+ if len(prop) == 2
+ else prop
+ for prop in _props
+ ]
+
+ # legacy ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ joins_to_add = tuple(
+ (
+ coercions.expect(
+ roles.JoinTargetRole,
+ prop[0],
+ legacy=True,
+ apply_propagate_attrs=self,
+ ),
+ (
+ coercions.expect(roles.OnClauseRole, prop[1], legacy=True)
+ # if not isinstance(prop[1], str)
+ # else prop[1]
+ )
+ if len(prop) == 2
+ else None,
+ None,
+ {
+ "isouter": isouter,
+ "aliased": aliased,
+ "from_joinpoint": True if i > 0 else from_joinpoint,
+ "full": full,
+ "aliased_generation": self._aliased_generation,
+ },
+ )
+ for i, prop in enumerate(_props)
+ )
+
+ if len(joins_to_add) > 1:
+ util.warn_deprecated_20(
+ "Passing a chain of multiple join conditions to Query.join() "
+ "is deprecated and will be removed in SQLAlchemy 2.0. "
+ "Please use individual join() calls per relationship."
+ )
+
+ self._legacy_setup_joins += joins_to_add
+
+ self.__dict__.pop("_last_joined_entity", None)
+
+ def outerjoin(self, target, *props, **kwargs):
+ """Create a left outer join against this ``Query`` object's criterion
+ and apply generatively, returning the newly resulting ``Query``.
+
+ Usage is the same as the ``join()`` method.
+
+ """
+ kwargs["isouter"] = True
+ return self.join(target, *props, **kwargs)
+
+ @_generative
+ @_assertions(_no_statement_condition)
+ def reset_joinpoint(self):
+ """Return a new :class:`.Query`, where the "join point" has
+ been reset back to the base FROM entities of the query.
+
+ This method is usually used in conjunction with the
+ ``aliased=True`` feature of the :meth:`~.Query.join`
+ method. See the example in :meth:`~.Query.join` for how
+ this is used.
+
+ """
+ self._last_joined_entity = None
+ self._aliased_generation = None
+
+ @_generative
+ @_assertions(_no_clauseelement_condition)
+ def select_from(self, *from_obj):
+ r"""Set the FROM clause of this :class:`.Query` explicitly.
+
+ :meth:`.Query.select_from` is often used in conjunction with
+ :meth:`.Query.join` in order to control which entity is selected
+ from on the "left" side of the join.
+
+ The entity or selectable object here effectively replaces the
+ "left edge" of any calls to :meth:`~.Query.join`, when no
+ joinpoint is otherwise established - usually, the default "join
+ point" is the leftmost entity in the :class:`~.Query` object's
+ list of entities to be selected.
+
+ A typical example::
+
+ q = session.query(Address).select_from(User).\
+ join(User.addresses).\
+ filter(User.name == 'ed')
+
+ Which produces SQL equivalent to::
+
+ SELECT address.* FROM user
+ JOIN address ON user.id=address.user_id
+ WHERE user.name = :name_1
+
+ :param \*from_obj: collection of one or more entities to apply
+ to the FROM clause. Entities can be mapped classes,
+ :class:`.AliasedClass` objects, :class:`.Mapper` objects
+ as well as core :class:`.FromClause` elements like subqueries.
+
+ .. versionchanged:: 0.9
+ This method no longer applies the given FROM object
+ to be the selectable from which matching entities
+ select from; the :meth:`.select_entity_from` method
+ now accomplishes this. See that method for a description
+ of this behavior.
+
+ .. seealso::
+
+ :meth:`~.Query.join`
+
+ :meth:`.Query.select_entity_from`
+
+ """
+
+ self._set_select_from(from_obj, False)
+
+ @util.deprecated_20(
+ ":meth:`_orm.Query.select_entity_from`",
+ alternative="Use the :func:`_orm.aliased` construct instead",
+ )
+ @_generative
+ @_assertions(_no_clauseelement_condition)
+ def select_entity_from(self, from_obj):
+ r"""Set the FROM clause of this :class:`_query.Query` to a
+ core selectable, applying it as a replacement FROM clause
+ for corresponding mapped entities.
+
+ The :meth:`_query.Query.select_entity_from`
+ method supplies an alternative
+ approach to the use case of applying an :func:`.aliased` construct
+ explicitly throughout a query. Instead of referring to the
+ :func:`.aliased` construct explicitly,
+ :meth:`_query.Query.select_entity_from` automatically *adapts* all
+ occurrences of the entity to the target selectable.
+
+ Given a case for :func:`.aliased` such as selecting ``User``
+ objects from a SELECT statement::
+
+ select_stmt = select(User).where(User.id == 7)
+ user_alias = aliased(User, select_stmt)
+
+ q = session.query(user_alias).\
+ filter(user_alias.name == 'ed')
+
+ Above, we apply the ``user_alias`` object explicitly throughout the
+ query. When it's not feasible for ``user_alias`` to be referenced
+ explicitly in many places, :meth:`_query.Query.select_entity_from`
+ may be
+ used at the start of the query to adapt the existing ``User`` entity::
+
+ q = session.query(User).\
+ select_entity_from(select_stmt.subquery()).\
+ filter(User.name == 'ed')
+
+ Above, the generated SQL will show that the ``User`` entity is
+ adapted to our statement, even in the case of the WHERE clause:
+
+ .. sourcecode:: sql
+
+ SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
+ FROM (SELECT "user".id AS id, "user".name AS name
+ FROM "user"
+ WHERE "user".id = :id_1) AS anon_1
+ WHERE anon_1.name = :name_1
+
+ The :meth:`_query.Query.select_entity_from` method is similar to the
+ :meth:`_query.Query.select_from` method,
+ in that it sets the FROM clause
+ of the query. The difference is that it additionally applies
+ adaptation to the other parts of the query that refer to the
+ primary entity. If above we had used :meth:`_query.Query.select_from`
+ instead, the SQL generated would have been:
+
+ .. sourcecode:: sql
+
+ -- uses plain select_from(), not select_entity_from()
+ SELECT "user".id AS user_id, "user".name AS user_name
+ FROM "user", (SELECT "user".id AS id, "user".name AS name
+ FROM "user"
+ WHERE "user".id = :id_1) AS anon_1
+ WHERE "user".name = :name_1
+
+ To supply textual SQL to the :meth:`_query.Query.select_entity_from`
+ method,
+ we can make use of the :func:`_expression.text` construct. However,
+ the
+ :func:`_expression.text`
+ construct needs to be aligned with the columns of our
+ entity, which is achieved by making use of the
+ :meth:`_expression.TextClause.columns` method::
+
+ text_stmt = text("select id, name from user").columns(
+ User.id, User.name).subquery()
+ q = session.query(User).select_entity_from(text_stmt)
+
+ :meth:`_query.Query.select_entity_from` itself accepts an
+ :func:`.aliased`
+ object, so that the special options of :func:`.aliased` such as
+ :paramref:`.aliased.adapt_on_names` may be used within the
+ scope of the :meth:`_query.Query.select_entity_from`
+ method's adaptation
+ services. Suppose
+ a view ``user_view`` also returns rows from ``user``. If
+ we reflect this view into a :class:`_schema.Table`, this view has no
+ relationship to the :class:`_schema.Table` to which we are mapped,
+ however
+ we can use name matching to select from it::
+
+ user_view = Table('user_view', metadata,
+ autoload_with=engine)
+ user_view_alias = aliased(
+ User, user_view, adapt_on_names=True)
+ q = session.query(User).\
+ select_entity_from(user_view_alias).\
+ order_by(User.name)
+
+ .. versionchanged:: 1.1.7 The :meth:`_query.Query.select_entity_from`
+ method now accepts an :func:`.aliased` object as an alternative
+ to a :class:`_expression.FromClause` object.
+
+ :param from_obj: a :class:`_expression.FromClause`
+ object that will replace
+ the FROM clause of this :class:`_query.Query`.
+ It also may be an instance
+ of :func:`.aliased`.
+
+
+
+ .. seealso::
+
+ :meth:`_query.Query.select_from`
+
+ """
+
+ self._set_select_from([from_obj], True)
+ self._compile_options += {"_enable_single_crit": False}
+
+ def __getitem__(self, item):
+ return orm_util._getitem(
+ self,
+ item,
+ allow_negative=not self.session or not self.session.future,
+ )
+
+ @_generative
+ @_assertions(_no_statement_condition)
+ def slice(self, start, stop):
+ """Computes the "slice" of the :class:`_query.Query` represented by
+ the given indices and returns the resulting :class:`_query.Query`.
+
+ The start and stop indices behave like the argument to Python's
+ built-in :func:`range` function. This method provides an
+ alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
+ query.
+
+ For example, ::
+
+ session.query(User).order_by(User.id).slice(1, 3)
+
+ renders as
+
+ .. sourcecode:: sql
+
+ SELECT users.id AS users_id,
+ users.name AS users_name
+ FROM users ORDER BY users.id
+ LIMIT ? OFFSET ?
+ (2, 1)
+
+ .. seealso::
+
+ :meth:`_query.Query.limit`
+
+ :meth:`_query.Query.offset`
+
+ """
+
+ self._limit_clause, self._offset_clause = sql_util._make_slice(
+ self._limit_clause, self._offset_clause, start, stop
+ )
+
+ @_generative
+ @_assertions(_no_statement_condition)
+ def limit(self, limit):
+ """Apply a ``LIMIT`` to the query and return the newly resulting
+ ``Query``.
+
+ """
+ self._limit_clause = sql_util._offset_or_limit_clause(limit)
+
+ @_generative
+ @_assertions(_no_statement_condition)
+ def offset(self, offset):
+ """Apply an ``OFFSET`` to the query and return the newly resulting
+ ``Query``.
+
+ """
+ self._offset_clause = sql_util._offset_or_limit_clause(offset)
+
+ @_generative
+ @_assertions(_no_statement_condition)
+ def distinct(self, *expr):
+ r"""Apply a ``DISTINCT`` to the query and return the newly resulting
+ ``Query``.
+
+
+ .. note::
+
+ The ORM-level :meth:`.distinct` call includes logic that will
+ automatically add columns from the ORDER BY of the query to the
+ columns clause of the SELECT statement, to satisfy the common need
+ of the database backend that ORDER BY columns be part of the SELECT
+ list when DISTINCT is used. These columns *are not* added to the
+ list of columns actually fetched by the :class:`_query.Query`,
+ however,
+ so would not affect results. The columns are passed through when
+ using the :attr:`_query.Query.statement` accessor, however.
+
+ .. deprecated:: 2.0 This logic is deprecated and will be removed
+ in SQLAlchemy 2.0. See :ref:`migration_20_query_distinct`
+ for a description of this use case in 2.0.
+
+ :param \*expr: optional column expressions. When present,
+ the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>)``
+ construct.
+
+ .. deprecated:: 1.4 Using \*expr in other dialects is deprecated
+ and will raise :class:`_exc.CompileError` in a future version.
+
+ """
+ if expr:
+ self._distinct = True
+ self._distinct_on = self._distinct_on + tuple(
+ coercions.expect(roles.ByOfRole, e) for e in expr
+ )
+ else:
+ self._distinct = True
+
+ def all(self):
+ """Return the results represented by this :class:`_query.Query`
+ as a list.
+
+ This results in an execution of the underlying SQL statement.
+
+ .. warning:: The :class:`_query.Query` object,
+ when asked to return either
+ a sequence or iterator that consists of full ORM-mapped entities,
+ will **deduplicate entries based on primary key**. See the FAQ for
+ more details.
+
+ .. seealso::
+
+ :ref:`faq_query_deduplicating`
+ """
+ return self._iter().all()
+
+ @_generative
+ @_assertions(_no_clauseelement_condition)
+ def from_statement(self, statement):
+ """Execute the given SELECT statement and return results.
+
+ This method bypasses all internal statement compilation, and the
+ statement is executed without modification.
+
+ The statement is typically either a :func:`_expression.text`
+ or :func:`_expression.select` construct, and should return the set
+ of columns
+ appropriate to the entity class represented by this
+ :class:`_query.Query`.
+
+ .. seealso::
+
+ :ref:`orm_tutorial_literal_sql` - usage examples in the
+ ORM tutorial
+
+ """
+ statement = coercions.expect(
+ roles.SelectStatementRole, statement, apply_propagate_attrs=self
+ )
+ self._statement = statement
+
+ def first(self):
+ """Return the first result of this ``Query`` or
+ None if the result doesn't contain any row.
+
+ first() applies a limit of one within the generated SQL, so that
+ only one primary entity row is generated on the server side
+ (note this may consist of multiple result rows if join-loaded
+ collections are present).
+
+ Calling :meth:`_query.Query.first`
+ results in an execution of the underlying
+ query.
+
+ .. seealso::
+
+ :meth:`_query.Query.one`
+
+ :meth:`_query.Query.one_or_none`
+
+ """
+ # replicates limit(1) behavior
+ if self._statement is not None:
+ return self._iter().first()
+ else:
+ return self.limit(1)._iter().first()
+
+ def one_or_none(self):
+ """Return at most one result or raise an exception.
+
+ Returns ``None`` if the query selects
+ no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
+ if multiple object identities are returned, or if multiple
+ rows are returned for a query that returns only scalar values
+ as opposed to full identity-mapped entities.
+
+ Calling :meth:`_query.Query.one_or_none`
+ results in an execution of the
+ underlying query.
+
+ .. versionadded:: 1.0.9
+
+ Added :meth:`_query.Query.one_or_none`
+
+ .. seealso::
+
+ :meth:`_query.Query.first`
+
+ :meth:`_query.Query.one`
+
+ """
+ return self._iter().one_or_none()
+
+ def one(self):
+ """Return exactly one result or raise an exception.
+
+ Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
+ no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
+ if multiple object identities are returned, or if multiple
+ rows are returned for a query that returns only scalar values
+ as opposed to full identity-mapped entities.
+
+ Calling :meth:`.one` results in an execution of the underlying query.
+
+ .. seealso::
+
+ :meth:`_query.Query.first`
+
+ :meth:`_query.Query.one_or_none`
+
+ """
+ return self._iter().one()
+
+ def scalar(self):
+ """Return the first element of the first result or None
+ if no rows present. If multiple rows are returned,
+ raises MultipleResultsFound.
+
+ >>> session.query(Item).scalar()
+ <Item>
+ >>> session.query(Item.id).scalar()
+ 1
+ >>> session.query(Item.id).filter(Item.id < 0).scalar()
+ None
+ >>> session.query(Item.id, Item.name).scalar()
+ 1
+ >>> session.query(func.count(Parent.id)).scalar()
+ 20
+
+ This results in an execution of the underlying query.
+
+ """
+ # TODO: not sure why we can't use result.scalar() here
+ try:
+ ret = self.one()
+ if not isinstance(ret, collections_abc.Sequence):
+ return ret
+ return ret[0]
+ except orm_exc.NoResultFound:
+ return None
+
+ def __iter__(self):
+ return self._iter().__iter__()
+
+ def _iter(self):
+ # new style execution.
+ params = self._params
+
+ statement = self._statement_20()
+ result = self.session.execute(
+ statement,
+ params,
+ execution_options={"_sa_orm_load_options": self.load_options},
+ )
+
+ # legacy: automatically set scalars, unique
+ if result._attributes.get("is_single_entity", False):
+ result = result.scalars()
+
+ if (
+ result._attributes.get("filtered", False)
+ and not self.load_options._yield_per
+ ):
+ result = result.unique()
+
+ return result
+
+ def __str__(self):
+ statement = self._statement_20()
+
+ try:
+ bind = (
+ self._get_bind_args(statement, self.session.get_bind)
+ if self.session
+ else None
+ )
+ except sa_exc.UnboundExecutionError:
+ bind = None
+
+ return str(statement.compile(bind))
+
+ def _get_bind_args(self, statement, fn, **kw):
+ return fn(clause=statement, **kw)
+
+ @property
+ def column_descriptions(self):
+ """Return metadata about the columns which would be
+ returned by this :class:`_query.Query`.
+
+ Format is a list of dictionaries::
+
+ user_alias = aliased(User, name='user2')
+ q = sess.query(User, User.id, user_alias)
+
+ # this expression:
+ q.column_descriptions
+
+ # would return:
+ [
+ {
+ 'name':'User',
+ 'type':User,
+ 'aliased':False,
+ 'expr':User,
+ 'entity': User
+ },
+ {
+ 'name':'id',
+ 'type':Integer(),
+ 'aliased':False,
+ 'expr':User.id,
+ 'entity': User
+ },
+ {
+ 'name':'user2',
+ 'type':User,
+ 'aliased':True,
+ 'expr':user_alias,
+ 'entity': user_alias
+ }
+ ]
+
+ .. seealso::
+
+ This API is available using :term:`2.0 style` queries as well,
+ documented at:
+
+ * :ref:`queryguide_inspection`
+
+ * :attr:`.Select.column_descriptions`
+
+ """
+
+ return _column_descriptions(self, legacy=True)
+
+ def instances(self, result_proxy, context=None):
+ """Return an ORM result given a :class:`_engine.CursorResult` and
+ :class:`.QueryContext`.
+
+ """
+ if context is None:
+ util.warn_deprecated(
+ "Using the Query.instances() method without a context "
+ "is deprecated and will be disallowed in a future release. "
+ "Please make use of :meth:`_query.Query.from_statement` "
+ "for linking ORM results to arbitrary select constructs.",
+ version="1.4",
+ )
+ compile_state = self._compile_state(for_statement=False)
+
+ context = QueryContext(
+ compile_state,
+ compile_state.statement,
+ self._params,
+ self.session,
+ self.load_options,
+ )
+
+ result = loading.instances(result_proxy, context)
+
+ # legacy: automatically set scalars, unique
+ if result._attributes.get("is_single_entity", False):
+ result = result.scalars()
+
+ if result._attributes.get("filtered", False):
+ result = result.unique()
+
+ return result
+
+ @util.deprecated_20(
+ ":meth:`_orm.Query.merge_result`",
+ alternative="The method is superseded by the "
+ ":func:`_orm.merge_frozen_result` function.",
+ becomes_legacy=True,
+ enable_warnings=False, # warnings occur via loading.merge_result
+ )
+ def merge_result(self, iterator, load=True):
+ """Merge a result into this :class:`_query.Query` object's Session.
+
+ Given an iterator returned by a :class:`_query.Query`
+ of the same structure
+ as this one, return an identical iterator of results, with all mapped
+ instances merged into the session using :meth:`.Session.merge`. This
+ is an optimized method which will merge all mapped instances,
+ preserving the structure of the result rows and unmapped columns with
+ less method overhead than that of calling :meth:`.Session.merge`
+ explicitly for each value.
+
+ The structure of the results is determined based on the column list of
+ this :class:`_query.Query` - if these do not correspond,
+ unchecked errors
+ will occur.
+
+ The 'load' argument is the same as that of :meth:`.Session.merge`.
+
+ For an example of how :meth:`_query.Query.merge_result` is used, see
+ the source code for the example :ref:`examples_caching`, where
+ :meth:`_query.Query.merge_result` is used to efficiently restore state
+ from a cache back into a target :class:`.Session`.
+
+ """
+
+ return loading.merge_result(self, iterator, load)
+
+ def exists(self):
+ """A convenience method that turns a query into an EXISTS subquery
+ of the form EXISTS (SELECT 1 FROM ... WHERE ...).
+
+ e.g.::
+
+ q = session.query(User).filter(User.name == 'fred')
+ session.query(q.exists())
+
+ Producing SQL similar to::
+
+ SELECT EXISTS (
+ SELECT 1 FROM users WHERE users.name = :name_1
+ ) AS anon_1
+
+ The EXISTS construct is usually used in the WHERE clause::
+
+ session.query(User.id).filter(q.exists()).scalar()
+
+ Note that some databases such as SQL Server don't allow an
+ EXISTS expression to be present in the columns clause of a
+ SELECT. To select a simple boolean value based on the exists
+ as a WHERE, use :func:`.literal`::
+
+ from sqlalchemy import literal
+
+ session.query(literal(True)).filter(q.exists()).scalar()
+
+ """
+
+ # .add_columns() for the case that we are a query().select_from(X),
+ # so that ".statement" can be produced (#2995) but also without
+ # omitting the FROM clause from a query(X) (#2818);
+ # .with_only_columns() after we have a core select() so that
+ # we get just "SELECT 1" without any entities.
+
+ inner = (
+ self.enable_eagerloads(False)
+ .add_columns(sql.literal_column("1"))
+ .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
+ .statement.with_only_columns(1)
+ )
+
+ ezero = self._entity_from_pre_ent_zero()
+ if ezero is not None:
+ inner = inner.select_from(ezero)
+
+ return sql.exists(inner)
+
+ def count(self):
+ r"""Return a count of rows this the SQL formed by this :class:`Query`
+ would return.
+
+ This generates the SQL for this Query as follows::
+
+ SELECT count(1) AS count_1 FROM (
+ SELECT <rest of query follows...>
+ ) AS anon_1
+
+ The above SQL returns a single row, which is the aggregate value
+ of the count function; the :meth:`_query.Query.count`
+ method then returns
+ that single integer value.
+
+ .. warning::
+
+ It is important to note that the value returned by
+ count() is **not the same as the number of ORM objects that this
+ Query would return from a method such as the .all() method**.
+ The :class:`_query.Query` object,
+ when asked to return full entities,
+ will **deduplicate entries based on primary key**, meaning if the
+ same primary key value would appear in the results more than once,
+ only one object of that primary key would be present. This does
+ not apply to a query that is against individual columns.
+
+ .. seealso::
+
+ :ref:`faq_query_deduplicating`
+
+ :ref:`orm_tutorial_query_returning`
+
+ For fine grained control over specific columns to count, to skip the
+ usage of a subquery or otherwise control of the FROM clause, or to use
+ other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func`
+ expressions in conjunction with :meth:`~.Session.query`, i.e.::
+
+ from sqlalchemy import func
+
+ # count User records, without
+ # using a subquery.
+ session.query(func.count(User.id))
+
+ # return count of user "id" grouped
+ # by "name"
+ session.query(func.count(User.id)).\
+ group_by(User.name)
+
+ from sqlalchemy import distinct
+
+ # count distinct "name" values
+ session.query(func.count(distinct(User.name)))
+
+ """
+ col = sql.func.count(sql.literal_column("*"))
+ return self._from_self(col).enable_eagerloads(False).scalar()
+
+ def delete(self, synchronize_session="evaluate"):
+ r"""Perform a DELETE with an arbitrary WHERE clause.
+
+ Deletes rows matched by this query from the database.
+
+ E.g.::
+
+ sess.query(User).filter(User.age == 25).\
+ delete(synchronize_session=False)
+
+ sess.query(User).filter(User.age == 25).\
+ delete(synchronize_session='evaluate')
+
+ .. warning::
+
+ See the section :ref:`orm_expression_update_delete` for important
+ caveats and warnings, including limitations when using bulk UPDATE
+ and DELETE with mapper inheritance configurations.
+
+ :param synchronize_session: chooses the strategy to update the
+ attributes on objects in the session. See the section
+ :ref:`orm_expression_update_delete` for a discussion of these
+ strategies.
+
+ :return: the count of rows matched as returned by the database's
+ "row count" feature.
+
+ .. seealso::
+
+ :ref:`orm_expression_update_delete`
+
+ """
+
+ bulk_del = BulkDelete(self)
+ if self.dispatch.before_compile_delete:
+ for fn in self.dispatch.before_compile_delete:
+ new_query = fn(bulk_del.query, bulk_del)
+ if new_query is not None:
+ bulk_del.query = new_query
+
+ self = bulk_del.query
+
+ delete_ = sql.delete(*self._raw_columns)
+ delete_._where_criteria = self._where_criteria
+ result = self.session.execute(
+ delete_,
+ self._params,
+ execution_options={"synchronize_session": synchronize_session},
+ )
+ bulk_del.result = result
+ self.session.dispatch.after_bulk_delete(bulk_del)
+ result.close()
+
+ return result.rowcount
+
+ def update(self, values, synchronize_session="evaluate", update_args=None):
+ r"""Perform an UPDATE with an arbitrary WHERE clause.
+
+ Updates rows matched by this query in the database.
+
+ E.g.::
+
+ sess.query(User).filter(User.age == 25).\
+ update({User.age: User.age - 10}, synchronize_session=False)
+
+ sess.query(User).filter(User.age == 25).\
+ update({"age": User.age - 10}, synchronize_session='evaluate')
+
+ .. warning::
+
+ See the section :ref:`orm_expression_update_delete` for important
+ caveats and warnings, including limitations when using arbitrary
+ UPDATE and DELETE with mapper inheritance configurations.
+
+ :param values: a dictionary with attributes names, or alternatively
+ mapped attributes or SQL expressions, as keys, and literal
+ values or sql expressions as values. If :ref:`parameter-ordered
+ mode <tutorial_parameter_ordered_updates>` is desired, the values can
+ be passed as a list of 2-tuples; this requires that the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag is passed to the :paramref:`.Query.update.update_args` dictionary
+ as well.
+
+ :param synchronize_session: chooses the strategy to update the
+ attributes on objects in the session. See the section
+ :ref:`orm_expression_update_delete` for a discussion of these
+ strategies.
+
+ :param update_args: Optional dictionary, if present will be passed
+ to the underlying :func:`_expression.update`
+ construct as the ``**kw`` for
+ the object. May be used to pass dialect-specific arguments such
+ as ``mysql_limit``, as well as other special arguments such as
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
+
+ :return: the count of rows matched as returned by the database's
+ "row count" feature.
+
+
+ .. seealso::
+
+ :ref:`orm_expression_update_delete`
+
+
+ """
+
+ update_args = update_args or {}
+
+ bulk_ud = BulkUpdate(self, values, update_args)
+
+ if self.dispatch.before_compile_update:
+ for fn in self.dispatch.before_compile_update:
+ new_query = fn(bulk_ud.query, bulk_ud)
+ if new_query is not None:
+ bulk_ud.query = new_query
+ self = bulk_ud.query
+
+ upd = sql.update(*self._raw_columns)
+
+ ppo = update_args.pop("preserve_parameter_order", False)
+ if ppo:
+ upd = upd.ordered_values(*values)
+ else:
+ upd = upd.values(values)
+ if update_args:
+ upd = upd.with_dialect_options(**update_args)
+
+ upd._where_criteria = self._where_criteria
+ result = self.session.execute(
+ upd,
+ self._params,
+ execution_options={"synchronize_session": synchronize_session},
+ )
+ bulk_ud.result = result
+ self.session.dispatch.after_bulk_update(bulk_ud)
+ result.close()
+ return result.rowcount
+
+ def _compile_state(self, for_statement=False, **kw):
+ """Create an out-of-compiler ORMCompileState object.
+
+ The ORMCompileState object is normally created directly as a result
+ of the SQLCompiler.process() method being handed a Select()
+ or FromStatement() object that uses the "orm" plugin. This method
+ provides a means of creating this ORMCompileState object directly
+ without using the compiler.
+
+ This method is used only for deprecated cases, which include
+ the .from_self() method for a Query that has multiple levels
+ of .from_self() in use, as well as the instances() method. It is
+ also used within the test suite to generate ORMCompileState objects
+ for test purposes.
+
+ """
+
+ stmt = self._statement_20(for_statement=for_statement, **kw)
+ assert for_statement == stmt._compile_options._for_statement
+
+ # this chooses between ORMFromStatementCompileState and
+ # ORMSelectCompileState. We could also base this on
+ # query._statement is not None as we have the ORM Query here
+ # however this is the more general path.
+ compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
+ stmt, "orm"
+ )
+
+ return compile_state_cls.create_for_statement(stmt, None)
+
+ def _compile_context(self, for_statement=False):
+ compile_state = self._compile_state(for_statement=for_statement)
+ context = QueryContext(
+ compile_state,
+ compile_state.statement,
+ self._params,
+ self.session,
+ self.load_options,
+ )
+
+ return context
+
+
+class FromStatement(GroupedElement, SelectBase, Executable):
+ """Core construct that represents a load of ORM objects from a finished
+ select or text construct.
+
+ """
+
+ __visit_name__ = "orm_from_statement"
+
+ _compile_options = ORMFromStatementCompileState.default_compile_options
+
+ _compile_state_factory = ORMFromStatementCompileState.create_for_statement
+
+ _for_update_arg = None
+
+ _traverse_internals = [
+ ("_raw_columns", InternalTraversal.dp_clauseelement_list),
+ ("element", InternalTraversal.dp_clauseelement),
+ ] + Executable._executable_traverse_internals
+
+ _cache_key_traversal = _traverse_internals + [
+ ("_compile_options", InternalTraversal.dp_has_cache_key)
+ ]
+
+ def __init__(self, entities, element):
+ self._raw_columns = [
+ coercions.expect(
+ roles.ColumnsClauseRole,
+ ent,
+ apply_propagate_attrs=self,
+ post_inspect=True,
+ )
+ for ent in util.to_list(entities)
+ ]
+ self.element = element
+
+ def get_label_style(self):
+ return self._label_style
+
+ def set_label_style(self, label_style):
+ return SelectStatementGrouping(
+ self.element.set_label_style(label_style)
+ )
+
+ @property
+ def _label_style(self):
+ return self.element._label_style
+
+ def _compiler_dispatch(self, compiler, **kw):
+
+ """provide a fixed _compiler_dispatch method.
+
+ This is roughly similar to using the sqlalchemy.ext.compiler
+ ``@compiles`` extension.
+
+ """
+
+ compile_state = self._compile_state_factory(self, compiler, **kw)
+
+ toplevel = not compiler.stack
+
+ if toplevel:
+ compiler.compile_state = compile_state
+
+ return compiler.process(compile_state.statement, **kw)
+
+ def _ensure_disambiguated_names(self):
+ return self
+
+ def get_children(self, **kw):
+ for elem in itertools.chain.from_iterable(
+ element._from_objects for element in self._raw_columns
+ ):
+ yield elem
+ for elem in super(FromStatement, self).get_children(**kw):
+ yield elem
+
+ @property
+ def _returning(self):
+ return self.element._returning if self.element.is_dml else None
+
+ @property
+ def _inline(self):
+ return self.element._inline if self.element.is_dml else None
+
+
+class AliasOption(interfaces.LoaderOption):
+ @util.deprecated(
+ "1.4",
+ "The :class:`.AliasOption` is not necessary "
+ "for entities to be matched up to a query that is established "
+ "via :meth:`.Query.from_statement` and now does nothing.",
+ )
+ def __init__(self, alias):
+ r"""Return a :class:`.MapperOption` that will indicate to the
+ :class:`_query.Query`
+ that the main table has been aliased.
+
+ """
+
+ inherit_cache = False
+
+ def process_compile_state(self, compile_state):
+ pass
+
+
+class BulkUD(object):
+ """State used for the orm.Query version of update() / delete().
+
+ This object is now specific to Query only.
+
+ """
+
+ def __init__(self, query):
+ self.query = query.enable_eagerloads(False)
+ self._validate_query_state()
+ self.mapper = self.query._entity_from_pre_ent_zero()
+
+ def _validate_query_state(self):
+ for attr, methname, notset, op in (
+ ("_limit_clause", "limit()", None, operator.is_),
+ ("_offset_clause", "offset()", None, operator.is_),
+ ("_order_by_clauses", "order_by()", (), operator.eq),
+ ("_group_by_clauses", "group_by()", (), operator.eq),
+ ("_distinct", "distinct()", False, operator.is_),
+ (
+ "_from_obj",
+ "join(), outerjoin(), select_from(), or from_self()",
+ (),
+ operator.eq,
+ ),
+ (
+ "_legacy_setup_joins",
+ "join(), outerjoin(), select_from(), or from_self()",
+ (),
+ operator.eq,
+ ),
+ ):
+ if not op(getattr(self.query, attr), notset):
+ raise sa_exc.InvalidRequestError(
+ "Can't call Query.update() or Query.delete() "
+ "when %s has been called" % (methname,)
+ )
+
+ @property
+ def session(self):
+ return self.query.session
+
+
+class BulkUpdate(BulkUD):
+ """BulkUD which handles UPDATEs."""
+
+ def __init__(self, query, values, update_kwargs):
+ super(BulkUpdate, self).__init__(query)
+ self.values = values
+ self.update_kwargs = update_kwargs
+
+
+class BulkDelete(BulkUD):
+ """BulkUD which handles DELETEs."""
diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py
new file mode 100644
index 0000000..b51ea0e
--- /dev/null
+++ b/lib/sqlalchemy/orm/relationships.py
@@ -0,0 +1,3684 @@
+# orm/relationships.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Heuristics related to join conditions as used in
+:func:`_orm.relationship`.
+
+Provides the :class:`.JoinCondition` object, which encapsulates
+SQL annotation and aliasing behavior focused on the `primaryjoin`
+and `secondaryjoin` aspects of :func:`_orm.relationship`.
+
+"""
+from __future__ import absolute_import
+
+import collections
+import re
+import weakref
+
+from . import attributes
+from .base import _is_mapped_class
+from .base import state_str
+from .interfaces import MANYTOMANY
+from .interfaces import MANYTOONE
+from .interfaces import ONETOMANY
+from .interfaces import PropComparator
+from .interfaces import StrategizedProperty
+from .util import _orm_annotate
+from .util import _orm_deannotate
+from .util import CascadeOptions
+from .. import exc as sa_exc
+from .. import log
+from .. import schema
+from .. import sql
+from .. import util
+from ..inspection import inspect
+from ..sql import coercions
+from ..sql import expression
+from ..sql import operators
+from ..sql import roles
+from ..sql import visitors
+from ..sql.util import _deep_deannotate
+from ..sql.util import _shallow_annotate
+from ..sql.util import adapt_criterion_to_null
+from ..sql.util import ClauseAdapter
+from ..sql.util import join_condition
+from ..sql.util import selectables_overlap
+from ..sql.util import visit_binary_product
+
+
+def remote(expr):
+ """Annotate a portion of a primaryjoin expression
+ with a 'remote' annotation.
+
+ See the section :ref:`relationship_custom_foreign` for a
+ description of use.
+
+ .. seealso::
+
+ :ref:`relationship_custom_foreign`
+
+ :func:`.foreign`
+
+ """
+ return _annotate_columns(
+ coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
+ )
+
+
+def foreign(expr):
+ """Annotate a portion of a primaryjoin expression
+ with a 'foreign' annotation.
+
+ See the section :ref:`relationship_custom_foreign` for a
+ description of use.
+
+ .. seealso::
+
+ :ref:`relationship_custom_foreign`
+
+ :func:`.remote`
+
+ """
+
+ return _annotate_columns(
+ coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
+ )
+
+
+@log.class_logger
+class RelationshipProperty(StrategizedProperty):
+ """Describes an object property that holds a single item or list
+ of items that correspond to a related database table.
+
+ Public constructor is the :func:`_orm.relationship` function.
+
+ .. seealso::
+
+ :ref:`relationship_config_toplevel`
+
+ """
+
+ strategy_wildcard_key = "relationship"
+ inherit_cache = True
+
+ _links_to_entity = True
+
+ _persistence_only = dict(
+ passive_deletes=False,
+ passive_updates=True,
+ enable_typechecks=True,
+ active_history=False,
+ cascade_backrefs=True,
+ )
+
+ _dependency_processor = None
+
+ def __init__(
+ self,
+ argument,
+ secondary=None,
+ primaryjoin=None,
+ secondaryjoin=None,
+ foreign_keys=None,
+ uselist=None,
+ order_by=False,
+ backref=None,
+ back_populates=None,
+ overlaps=None,
+ post_update=False,
+ cascade=False,
+ viewonly=False,
+ lazy="select",
+ collection_class=None,
+ passive_deletes=_persistence_only["passive_deletes"],
+ passive_updates=_persistence_only["passive_updates"],
+ remote_side=None,
+ enable_typechecks=_persistence_only["enable_typechecks"],
+ join_depth=None,
+ comparator_factory=None,
+ single_parent=False,
+ innerjoin=False,
+ distinct_target_key=None,
+ doc=None,
+ active_history=_persistence_only["active_history"],
+ cascade_backrefs=_persistence_only["cascade_backrefs"],
+ load_on_pending=False,
+ bake_queries=True,
+ _local_remote_pairs=None,
+ query_class=None,
+ info=None,
+ omit_join=None,
+ sync_backref=None,
+ _legacy_inactive_history_style=False,
+ ):
+ """Provide a relationship between two mapped classes.
+
+ This corresponds to a parent-child or associative table relationship.
+ The constructed class is an instance of
+ :class:`.RelationshipProperty`.
+
+ A typical :func:`_orm.relationship`, used in a classical mapping::
+
+ mapper(Parent, properties={
+ 'children': relationship(Child)
+ })
+
+ Some arguments accepted by :func:`_orm.relationship`
+ optionally accept a
+ callable function, which when called produces the desired value.
+ The callable is invoked by the parent :class:`_orm.Mapper` at "mapper
+ initialization" time, which happens only when mappers are first used,
+ and is assumed to be after all mappings have been constructed. This
+ can be used to resolve order-of-declaration and other dependency
+ issues, such as if ``Child`` is declared below ``Parent`` in the same
+ file::
+
+ mapper(Parent, properties={
+ "children":relationship(lambda: Child,
+ order_by=lambda: Child.id)
+ })
+
+ When using the :ref:`declarative_toplevel` extension, the Declarative
+ initializer allows string arguments to be passed to
+ :func:`_orm.relationship`. These string arguments are converted into
+ callables that evaluate the string as Python code, using the
+ Declarative class-registry as a namespace. This allows the lookup of
+ related classes to be automatic via their string name, and removes the
+ need for related classes to be imported into the local module space
+ before the dependent classes have been declared. It is still required
+ that the modules in which these related classes appear are imported
+ anywhere in the application at some point before the related mappings
+ are actually used, else a lookup error will be raised when the
+ :func:`_orm.relationship`
+ attempts to resolve the string reference to the
+ related class. An example of a string- resolved class is as
+ follows::
+
+ from sqlalchemy.ext.declarative import declarative_base
+
+ Base = declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child", order_by="Child.id")
+
+ .. seealso::
+
+ :ref:`relationship_config_toplevel` - Full introductory and
+ reference documentation for :func:`_orm.relationship`.
+
+ :ref:`tutorial_orm_related_objects` - ORM tutorial introduction.
+
+ :param argument:
+ A mapped class, or actual :class:`_orm.Mapper` instance,
+ representing
+ the target of the relationship.
+
+ :paramref:`_orm.relationship.argument`
+ may also be passed as a callable
+ function which is evaluated at mapper initialization time, and may
+ be passed as a string name when using Declarative.
+
+ .. warning:: Prior to SQLAlchemy 1.3.16, this value is interpreted
+ using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ .. versionchanged 1.3.16::
+
+ The string evaluation of the main "argument" no longer accepts an
+ open ended Python expression, instead only accepting a string
+ class name or dotted package-qualified name.
+
+ .. seealso::
+
+ :ref:`declarative_configuring_relationships` - further detail
+ on relationship configuration when using Declarative.
+
+ :param secondary:
+ For a many-to-many relationship, specifies the intermediary
+ table, and is typically an instance of :class:`_schema.Table`.
+ In less common circumstances, the argument may also be specified
+ as an :class:`_expression.Alias` construct, or even a
+ :class:`_expression.Join` construct.
+
+ :paramref:`_orm.relationship.secondary` may
+ also be passed as a callable function which is evaluated at
+ mapper initialization time. When using Declarative, it may also
+ be a string argument noting the name of a :class:`_schema.Table`
+ that is
+ present in the :class:`_schema.MetaData`
+ collection associated with the
+ parent-mapped :class:`_schema.Table`.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ The :paramref:`_orm.relationship.secondary` keyword argument is
+ typically applied in the case where the intermediary
+ :class:`_schema.Table`
+ is not otherwise expressed in any direct class mapping. If the
+ "secondary" table is also explicitly mapped elsewhere (e.g. as in
+ :ref:`association_pattern`), one should consider applying the
+ :paramref:`_orm.relationship.viewonly` flag so that this
+ :func:`_orm.relationship`
+ is not used for persistence operations which
+ may conflict with those of the association object pattern.
+
+ .. seealso::
+
+ :ref:`relationships_many_to_many` - Reference example of "many
+ to many".
+
+ :ref:`self_referential_many_to_many` - Specifics on using
+ many-to-many in a self-referential case.
+
+ :ref:`declarative_many_to_many` - Additional options when using
+ Declarative.
+
+ :ref:`association_pattern` - an alternative to
+ :paramref:`_orm.relationship.secondary`
+ when composing association
+ table relationships, allowing additional attributes to be
+ specified on the association table.
+
+ :ref:`composite_secondary_join` - a lesser-used pattern which
+ in some cases can enable complex :func:`_orm.relationship` SQL
+ conditions to be used.
+
+ .. versionadded:: 0.9.2 :paramref:`_orm.relationship.secondary`
+ works
+ more effectively when referring to a :class:`_expression.Join`
+ instance.
+
+ :param active_history=False:
+ When ``True``, indicates that the "previous" value for a
+ many-to-one reference should be loaded when replaced, if
+ not already loaded. Normally, history tracking logic for
+ simple many-to-ones only needs to be aware of the "new"
+ value in order to perform a flush. This flag is available
+ for applications that make use of
+ :func:`.attributes.get_history` which also need to know
+ the "previous" value of the attribute.
+
+ :param backref:
+ A reference to a string relationship name, or a :func:`_orm.backref`
+ construct, which will be used to automatically generate a new
+ :func:`_orm.relationship` on the related class, which then refers to
+ this one using a bi-directional
+ :paramref:`_orm.relationship.back_populates` configuration.
+
+ In modern Python, explicit use of :func:`_orm.relationship` with
+ :paramref:`_orm.relationship.back_populates` should be preferred, as
+ it is more robust in terms of mapper configuration as well as more
+ conceptually straightforward. It also integrates with new :pep:`484`
+ typing features introduced in SQLAlchemy 2.0 which is not possible
+ with dynamically generated attributes.
+
+ .. seealso::
+
+ :ref:`relationships_backref` - notes on using
+ :paramref:`_orm.relationship.backref`
+
+ :ref:`tutorial_orm_related_objects` - in the
+ :ref:`unified_tutorial`, presents an overview of bi-directional
+ relationship configuration and behaviors using
+ :paramref:`_orm.relationship.back_populates`
+
+ :func:`.backref` - allows control over :func:`_orm.relationship`
+ configuration when using :paramref:`_orm.relationship.backref`.
+
+
+ :param back_populates:
+ Indicates the name of a :func:`_orm.relationship` on the related
+ class that will be synchronized with this one. It is usually
+ expected that the :func:`_orm.relationship` on the related class
+ also refer to this one. This allows objects on both sides of
+ each :func:`_orm.relationship` to synchronize in-Python state
+ changes and also provides directives to the :term:`unit of work`
+ flush process how changes along these relationships should
+ be persisted.
+
+ .. seealso::
+
+ :ref:`tutorial_orm_related_objects` - in the
+ :ref:`unified_tutorial`, presents an overview of bi-directional
+ relationship configuration and behaviors.
+
+ :ref:`relationship_patterns` - includes many examples of
+ :paramref:`_orm.relationship.back_populates`.
+
+ :param overlaps:
+ A string name or comma-delimited set of names of other relationships
+ on either this mapper, a descendant mapper, or a target mapper with
+ which this relationship may write to the same foreign keys upon
+ persistence. The only effect this has is to eliminate the
+ warning that this relationship will conflict with another upon
+ persistence. This is used for such relationships that are truly
+ capable of conflicting with each other on write, but the application
+ will ensure that no such conflicts occur.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`error_qzyx` - usage example
+
+ :param bake_queries=True:
+ Legacy parameter, not used.
+
+ .. versionchanged:: 1.4.23 the "lambda caching" system is no longer
+ used by loader strategies and the ``bake_queries`` parameter
+ has no effect.
+
+ :param cascade:
+ A comma-separated list of cascade rules which determines how
+ Session operations should be "cascaded" from parent to child.
+ This defaults to ``False``, which means the default cascade
+ should be used - this default cascade is ``"save-update, merge"``.
+
+ The available cascades are ``save-update``, ``merge``,
+ ``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
+ An additional option, ``all`` indicates shorthand for
+ ``"save-update, merge, refresh-expire,
+ expunge, delete"``, and is often used as in ``"all, delete-orphan"``
+ to indicate that related objects should follow along with the
+ parent object in all cases, and be deleted when de-associated.
+
+ .. seealso::
+
+ :ref:`unitofwork_cascades` - Full detail on each of the available
+ cascade options.
+
+ :param cascade_backrefs=True:
+ A boolean value indicating if the ``save-update`` cascade should
+ operate along an assignment event intercepted by a backref.
+ When set to ``False``, the attribute managed by this relationship
+ will not cascade an incoming transient object into the session of a
+ persistent parent, if the event is received via backref.
+
+ .. deprecated:: 1.4 The
+ :paramref:`_orm.relationship.cascade_backrefs`
+ flag will default to False in all cases in SQLAlchemy 2.0.
+
+ .. seealso::
+
+ :ref:`backref_cascade` - Full discussion and examples on how
+ the :paramref:`_orm.relationship.cascade_backrefs` option is used.
+
+ :param collection_class:
+ A class or callable that returns a new list-holding object. will
+ be used in place of a plain list for storing elements.
+
+ .. seealso::
+
+ :ref:`custom_collections` - Introductory documentation and
+ examples.
+
+ :param comparator_factory:
+ A class which extends :class:`.RelationshipProperty.Comparator`
+ which provides custom SQL clause generation for comparison
+ operations.
+
+ .. seealso::
+
+ :class:`.PropComparator` - some detail on redefining comparators
+ at this level.
+
+ :ref:`custom_comparators` - Brief intro to this feature.
+
+
+ :param distinct_target_key=None:
+ Indicate if a "subquery" eager load should apply the DISTINCT
+ keyword to the innermost SELECT statement. When left as ``None``,
+ the DISTINCT keyword will be applied in those cases when the target
+ columns do not comprise the full primary key of the target table.
+ When set to ``True``, the DISTINCT keyword is applied to the
+ innermost SELECT unconditionally.
+
+ It may be desirable to set this flag to False when the DISTINCT is
+ reducing performance of the innermost subquery beyond that of what
+ duplicate innermost rows may be causing.
+
+ .. versionchanged:: 0.9.0 -
+ :paramref:`_orm.relationship.distinct_target_key` now defaults to
+ ``None``, so that the feature enables itself automatically for
+ those cases where the innermost query targets a non-unique
+ key.
+
+ .. seealso::
+
+ :ref:`loading_toplevel` - includes an introduction to subquery
+ eager loading.
+
+ :param doc:
+ Docstring which will be applied to the resulting descriptor.
+
+ :param foreign_keys:
+
+ A list of columns which are to be used as "foreign key"
+ columns, or columns which refer to the value in a remote
+ column, within the context of this :func:`_orm.relationship`
+ object's :paramref:`_orm.relationship.primaryjoin` condition.
+ That is, if the :paramref:`_orm.relationship.primaryjoin`
+ condition of this :func:`_orm.relationship` is ``a.id ==
+ b.a_id``, and the values in ``b.a_id`` are required to be
+ present in ``a.id``, then the "foreign key" column of this
+ :func:`_orm.relationship` is ``b.a_id``.
+
+ In normal cases, the :paramref:`_orm.relationship.foreign_keys`
+ parameter is **not required.** :func:`_orm.relationship` will
+ automatically determine which columns in the
+ :paramref:`_orm.relationship.primaryjoin` condition are to be
+ considered "foreign key" columns based on those
+ :class:`_schema.Column` objects that specify
+ :class:`_schema.ForeignKey`,
+ or are otherwise listed as referencing columns in a
+ :class:`_schema.ForeignKeyConstraint` construct.
+ :paramref:`_orm.relationship.foreign_keys` is only needed when:
+
+ 1. There is more than one way to construct a join from the local
+ table to the remote table, as there are multiple foreign key
+ references present. Setting ``foreign_keys`` will limit the
+ :func:`_orm.relationship`
+ to consider just those columns specified
+ here as "foreign".
+
+ 2. The :class:`_schema.Table` being mapped does not actually have
+ :class:`_schema.ForeignKey` or
+ :class:`_schema.ForeignKeyConstraint`
+ constructs present, often because the table
+ was reflected from a database that does not support foreign key
+ reflection (MySQL MyISAM).
+
+ 3. The :paramref:`_orm.relationship.primaryjoin`
+ argument is used to
+ construct a non-standard join condition, which makes use of
+ columns or expressions that do not normally refer to their
+ "parent" column, such as a join condition expressed by a
+ complex comparison using a SQL function.
+
+ The :func:`_orm.relationship` construct will raise informative
+ error messages that suggest the use of the
+ :paramref:`_orm.relationship.foreign_keys` parameter when
+ presented with an ambiguous condition. In typical cases,
+ if :func:`_orm.relationship` doesn't raise any exceptions, the
+ :paramref:`_orm.relationship.foreign_keys` parameter is usually
+ not needed.
+
+ :paramref:`_orm.relationship.foreign_keys` may also be passed as a
+ callable function which is evaluated at mapper initialization time,
+ and may be passed as a Python-evaluable string when using
+ Declarative.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ .. seealso::
+
+ :ref:`relationship_foreign_keys`
+
+ :ref:`relationship_custom_foreign`
+
+ :func:`.foreign` - allows direct annotation of the "foreign"
+ columns within a :paramref:`_orm.relationship.primaryjoin`
+ condition.
+
+ :param info: Optional data dictionary which will be populated into the
+ :attr:`.MapperProperty.info` attribute of this object.
+
+ :param innerjoin=False:
+ When ``True``, joined eager loads will use an inner join to join
+ against related tables instead of an outer join. The purpose
+ of this option is generally one of performance, as inner joins
+ generally perform better than outer joins.
+
+ This flag can be set to ``True`` when the relationship references an
+ object via many-to-one using local foreign keys that are not
+ nullable, or when the reference is one-to-one or a collection that
+ is guaranteed to have one or at least one entry.
+
+ The option supports the same "nested" and "unnested" options as
+ that of :paramref:`_orm.joinedload.innerjoin`. See that flag
+ for details on nested / unnested behaviors.
+
+ .. seealso::
+
+ :paramref:`_orm.joinedload.innerjoin` - the option as specified by
+ loader option, including detail on nesting behavior.
+
+ :ref:`what_kind_of_loading` - Discussion of some details of
+ various loader options.
+
+
+ :param join_depth:
+ When non-``None``, an integer value indicating how many levels
+ deep "eager" loaders should join on a self-referring or cyclical
+ relationship. The number counts how many times the same Mapper
+ shall be present in the loading condition along a particular join
+ branch. When left at its default of ``None``, eager loaders
+ will stop chaining when they encounter a the same target mapper
+ which is already higher up in the chain. This option applies
+ both to joined- and subquery- eager loaders.
+
+ .. seealso::
+
+ :ref:`self_referential_eager_loading` - Introductory documentation
+ and examples.
+
+ :param lazy='select': specifies
+ How the related items should be loaded. Default value is
+ ``select``. Values include:
+
+ * ``select`` - items should be loaded lazily when the property is
+ first accessed, using a separate SELECT statement, or identity map
+ fetch for simple many-to-one references.
+
+ * ``immediate`` - items should be loaded as the parents are loaded,
+ using a separate SELECT statement, or identity map fetch for
+ simple many-to-one references.
+
+ * ``joined`` - items should be loaded "eagerly" in the same query as
+ that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
+ the join is "outer" or not is determined by the
+ :paramref:`_orm.relationship.innerjoin` parameter.
+
+ * ``subquery`` - items should be loaded "eagerly" as the parents are
+ loaded, using one additional SQL statement, which issues a JOIN to
+ a subquery of the original statement, for each collection
+ requested.
+
+ * ``selectin`` - items should be loaded "eagerly" as the parents
+ are loaded, using one or more additional SQL statements, which
+ issues a JOIN to the immediate parent object, specifying primary
+ key identifiers using an IN clause.
+
+ .. versionadded:: 1.2
+
+ * ``noload`` - no loading should occur at any time. This is to
+ support "write-only" attributes, or attributes which are
+ populated in some manner specific to the application.
+
+ * ``raise`` - lazy loading is disallowed; accessing
+ the attribute, if its value were not already loaded via eager
+ loading, will raise an :exc:`~sqlalchemy.exc.InvalidRequestError`.
+ This strategy can be used when objects are to be detached from
+ their attached :class:`.Session` after they are loaded.
+
+ .. versionadded:: 1.1
+
+ * ``raise_on_sql`` - lazy loading that emits SQL is disallowed;
+ accessing the attribute, if its value were not already loaded via
+ eager loading, will raise an
+ :exc:`~sqlalchemy.exc.InvalidRequestError`, **if the lazy load
+ needs to emit SQL**. If the lazy load can pull the related value
+ from the identity map or determine that it should be None, the
+ value is loaded. This strategy can be used when objects will
+ remain associated with the attached :class:`.Session`, however
+ additional SELECT statements should be blocked.
+
+ .. versionadded:: 1.1
+
+ * ``dynamic`` - the attribute will return a pre-configured
+ :class:`_query.Query` object for all read
+ operations, onto which further filtering operations can be
+ applied before iterating the results. See
+ the section :ref:`dynamic_relationship` for more details.
+
+ * True - a synonym for 'select'
+
+ * False - a synonym for 'joined'
+
+ * None - a synonym for 'noload'
+
+ .. seealso::
+
+ :doc:`/orm/loading_relationships` - Full documentation on
+ relationship loader configuration.
+
+ :ref:`dynamic_relationship` - detail on the ``dynamic`` option.
+
+ :ref:`collections_noload_raiseload` - notes on "noload" and "raise"
+
+ :param load_on_pending=False:
+ Indicates loading behavior for transient or pending parent objects.
+
+ When set to ``True``, causes the lazy-loader to
+ issue a query for a parent object that is not persistent, meaning it
+ has never been flushed. This may take effect for a pending object
+ when autoflush is disabled, or for a transient object that has been
+ "attached" to a :class:`.Session` but is not part of its pending
+ collection.
+
+ The :paramref:`_orm.relationship.load_on_pending`
+ flag does not improve
+ behavior when the ORM is used normally - object references should be
+ constructed at the object level, not at the foreign key level, so
+ that they are present in an ordinary way before a flush proceeds.
+ This flag is not not intended for general use.
+
+ .. seealso::
+
+ :meth:`.Session.enable_relationship_loading` - this method
+ establishes "load on pending" behavior for the whole object, and
+ also allows loading on objects that remain transient or
+ detached.
+
+ :param order_by:
+ Indicates the ordering that should be applied when loading these
+ items. :paramref:`_orm.relationship.order_by`
+ is expected to refer to
+ one of the :class:`_schema.Column`
+ objects to which the target class is
+ mapped, or the attribute itself bound to the target class which
+ refers to the column.
+
+ :paramref:`_orm.relationship.order_by`
+ may also be passed as a callable
+ function which is evaluated at mapper initialization time, and may
+ be passed as a Python-evaluable string when using Declarative.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ :param passive_deletes=False:
+ Indicates loading behavior during delete operations.
+
+ A value of True indicates that unloaded child items should not
+ be loaded during a delete operation on the parent. Normally,
+ when a parent item is deleted, all child items are loaded so
+ that they can either be marked as deleted, or have their
+ foreign key to the parent set to NULL. Marking this flag as
+ True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
+ place which will handle updating/deleting child rows on the
+ database side.
+
+ Additionally, setting the flag to the string value 'all' will
+ disable the "nulling out" of the child foreign keys, when the parent
+ object is deleted and there is no delete or delete-orphan cascade
+ enabled. This is typically used when a triggering or error raise
+ scenario is in place on the database side. Note that the foreign
+ key attributes on in-session child objects will not be changed after
+ a flush occurs so this is a very special use-case setting.
+ Additionally, the "nulling out" will still occur if the child
+ object is de-associated with the parent.
+
+ .. seealso::
+
+ :ref:`passive_deletes` - Introductory documentation
+ and examples.
+
+ :param passive_updates=True:
+ Indicates the persistence behavior to take when a referenced
+ primary key value changes in place, indicating that the referencing
+ foreign key columns will also need their value changed.
+
+ When True, it is assumed that ``ON UPDATE CASCADE`` is configured on
+ the foreign key in the database, and that the database will
+ handle propagation of an UPDATE from a source column to
+ dependent rows. When False, the SQLAlchemy
+ :func:`_orm.relationship`
+ construct will attempt to emit its own UPDATE statements to
+ modify related targets. However note that SQLAlchemy **cannot**
+ emit an UPDATE for more than one level of cascade. Also,
+ setting this flag to False is not compatible in the case where
+ the database is in fact enforcing referential integrity, unless
+ those constraints are explicitly "deferred", if the target backend
+ supports it.
+
+ It is highly advised that an application which is employing
+ mutable primary keys keeps ``passive_updates`` set to True,
+ and instead uses the referential integrity features of the database
+ itself in order to handle the change efficiently and fully.
+
+ .. seealso::
+
+ :ref:`passive_updates` - Introductory documentation and
+ examples.
+
+ :paramref:`.mapper.passive_updates` - a similar flag which
+ takes effect for joined-table inheritance mappings.
+
+ :param post_update:
+ This indicates that the relationship should be handled by a
+ second UPDATE statement after an INSERT or before a
+ DELETE. Currently, it also will issue an UPDATE after the
+ instance was UPDATEd as well, although this technically should
+ be improved. This flag is used to handle saving bi-directional
+ dependencies between two individual rows (i.e. each row
+ references the other), where it would otherwise be impossible to
+ INSERT or DELETE both rows fully since one row exists before the
+ other. Use this flag when a particular mapping arrangement will
+ incur two rows that are dependent on each other, such as a table
+ that has a one-to-many relationship to a set of child rows, and
+ also has a column that references a single child row within that
+ list (i.e. both tables contain a foreign key to each other). If
+ a flush operation returns an error that a "cyclical
+ dependency" was detected, this is a cue that you might want to
+ use :paramref:`_orm.relationship.post_update` to "break" the cycle.
+
+ .. seealso::
+
+ :ref:`post_update` - Introductory documentation and examples.
+
+ :param primaryjoin:
+ A SQL expression that will be used as the primary
+ join of the child object against the parent object, or in a
+ many-to-many relationship the join of the parent object to the
+ association table. By default, this value is computed based on the
+ foreign key relationships of the parent and child tables (or
+ association table).
+
+ :paramref:`_orm.relationship.primaryjoin` may also be passed as a
+ callable function which is evaluated at mapper initialization time,
+ and may be passed as a Python-evaluable string when using
+ Declarative.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ .. seealso::
+
+ :ref:`relationship_primaryjoin`
+
+ :param remote_side:
+ Used for self-referential relationships, indicates the column or
+ list of columns that form the "remote side" of the relationship.
+
+ :paramref:`_orm.relationship.remote_side` may also be passed as a
+ callable function which is evaluated at mapper initialization time,
+ and may be passed as a Python-evaluable string when using
+ Declarative.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ .. seealso::
+
+ :ref:`self_referential` - in-depth explanation of how
+ :paramref:`_orm.relationship.remote_side`
+ is used to configure self-referential relationships.
+
+ :func:`.remote` - an annotation function that accomplishes the
+ same purpose as :paramref:`_orm.relationship.remote_side`,
+ typically
+ when a custom :paramref:`_orm.relationship.primaryjoin` condition
+ is used.
+
+ :param query_class:
+ A :class:`_query.Query`
+ subclass that will be used internally by the
+ ``AppenderQuery`` returned by a "dynamic" relationship, that
+ is, a relationship that specifies ``lazy="dynamic"`` or was
+ otherwise constructed using the :func:`_orm.dynamic_loader`
+ function.
+
+ .. seealso::
+
+ :ref:`dynamic_relationship` - Introduction to "dynamic"
+ relationship loaders.
+
+ :param secondaryjoin:
+ A SQL expression that will be used as the join of
+ an association table to the child object. By default, this value is
+ computed based on the foreign key relationships of the association
+ and child tables.
+
+ :paramref:`_orm.relationship.secondaryjoin` may also be passed as a
+ callable function which is evaluated at mapper initialization time,
+ and may be passed as a Python-evaluable string when using
+ Declarative.
+
+ .. warning:: When passed as a Python-evaluable string, the
+ argument is interpreted using Python's ``eval()`` function.
+ **DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
+ See :ref:`declarative_relationship_eval` for details on
+ declarative evaluation of :func:`_orm.relationship` arguments.
+
+ .. seealso::
+
+ :ref:`relationship_primaryjoin`
+
+ :param single_parent:
+ When True, installs a validator which will prevent objects
+ from being associated with more than one parent at a time.
+ This is used for many-to-one or many-to-many relationships that
+ should be treated either as one-to-one or one-to-many. Its usage
+ is optional, except for :func:`_orm.relationship` constructs which
+ are many-to-one or many-to-many and also
+ specify the ``delete-orphan`` cascade option. The
+ :func:`_orm.relationship` construct itself will raise an error
+ instructing when this option is required.
+
+ .. seealso::
+
+ :ref:`unitofwork_cascades` - includes detail on when the
+ :paramref:`_orm.relationship.single_parent`
+ flag may be appropriate.
+
+ :param uselist:
+ A boolean that indicates if this property should be loaded as a
+ list or a scalar. In most cases, this value is determined
+ automatically by :func:`_orm.relationship` at mapper configuration
+ time, based on the type and direction
+ of the relationship - one to many forms a list, many to one
+ forms a scalar, many to many is a list. If a scalar is desired
+ where normally a list would be present, such as a bi-directional
+ one-to-one relationship, set :paramref:`_orm.relationship.uselist`
+ to
+ False.
+
+ The :paramref:`_orm.relationship.uselist`
+ flag is also available on an
+ existing :func:`_orm.relationship`
+ construct as a read-only attribute,
+ which can be used to determine if this :func:`_orm.relationship`
+ deals
+ with collections or scalar attributes::
+
+ >>> User.addresses.property.uselist
+ True
+
+ .. seealso::
+
+ :ref:`relationships_one_to_one` - Introduction to the "one to
+ one" relationship pattern, which is typically when the
+ :paramref:`_orm.relationship.uselist` flag is needed.
+
+ :param viewonly=False:
+ When set to ``True``, the relationship is used only for loading
+ objects, and not for any persistence operation. A
+ :func:`_orm.relationship` which specifies
+ :paramref:`_orm.relationship.viewonly` can work
+ with a wider range of SQL operations within the
+ :paramref:`_orm.relationship.primaryjoin` condition, including
+ operations that feature the use of a variety of comparison operators
+ as well as SQL functions such as :func:`_expression.cast`. The
+ :paramref:`_orm.relationship.viewonly`
+ flag is also of general use when defining any kind of
+ :func:`_orm.relationship` that doesn't represent
+ the full set of related objects, to prevent modifications of the
+ collection from resulting in persistence operations.
+
+ When using the :paramref:`_orm.relationship.viewonly` flag in
+ conjunction with backrefs, the originating relationship for a
+ particular state change will not produce state changes within the
+ viewonly relationship. This is the behavior implied by
+ :paramref:`_orm.relationship.sync_backref` being set to False.
+
+ .. versionchanged:: 1.3.17 - the
+ :paramref:`_orm.relationship.sync_backref` flag is set to False
+ when using viewonly in conjunction with backrefs.
+
+ .. seealso::
+
+ :paramref:`_orm.relationship.sync_backref`
+
+ :param sync_backref:
+ A boolean that enables the events used to synchronize the in-Python
+ attributes when this relationship is target of either
+ :paramref:`_orm.relationship.backref` or
+ :paramref:`_orm.relationship.back_populates`.
+
+ Defaults to ``None``, which indicates that an automatic value should
+ be selected based on the value of the
+ :paramref:`_orm.relationship.viewonly` flag. When left at its
+ default, changes in state will be back-populated only if neither
+ sides of a relationship is viewonly.
+
+ .. versionadded:: 1.3.17
+
+ .. versionchanged:: 1.4 - A relationship that specifies
+ :paramref:`_orm.relationship.viewonly` automatically implies
+ that :paramref:`_orm.relationship.sync_backref` is ``False``.
+
+ .. seealso::
+
+ :paramref:`_orm.relationship.viewonly`
+
+ :param omit_join:
+ Allows manual control over the "selectin" automatic join
+ optimization. Set to ``False`` to disable the "omit join" feature
+ added in SQLAlchemy 1.3; or leave as ``None`` to leave automatic
+ optimization in place.
+
+ .. note:: This flag may only be set to ``False``. It is not
+ necessary to set it to ``True`` as the "omit_join" optimization is
+ automatically detected; if it is not detected, then the
+ optimization is not supported.
+
+ .. versionchanged:: 1.3.11 setting ``omit_join`` to True will now
+ emit a warning as this was not the intended use of this flag.
+
+ .. versionadded:: 1.3
+
+
+ """
+ super(RelationshipProperty, self).__init__()
+
+ self.uselist = uselist
+ self.argument = argument
+ self.secondary = secondary
+ self.primaryjoin = primaryjoin
+ self.secondaryjoin = secondaryjoin
+ self.post_update = post_update
+ self.direction = None
+ self.viewonly = viewonly
+ if viewonly:
+ self._warn_for_persistence_only_flags(
+ passive_deletes=passive_deletes,
+ passive_updates=passive_updates,
+ enable_typechecks=enable_typechecks,
+ active_history=active_history,
+ cascade_backrefs=cascade_backrefs,
+ )
+ if viewonly and sync_backref:
+ raise sa_exc.ArgumentError(
+ "sync_backref and viewonly cannot both be True"
+ )
+ self.sync_backref = sync_backref
+ self.lazy = lazy
+ self.single_parent = single_parent
+ self._user_defined_foreign_keys = foreign_keys
+ self.collection_class = collection_class
+ self.passive_deletes = passive_deletes
+ self.cascade_backrefs = cascade_backrefs
+ self.passive_updates = passive_updates
+ self.remote_side = remote_side
+ self.enable_typechecks = enable_typechecks
+ self.query_class = query_class
+ self.innerjoin = innerjoin
+ self.distinct_target_key = distinct_target_key
+ self.doc = doc
+ self.active_history = active_history
+ self._legacy_inactive_history_style = _legacy_inactive_history_style
+
+ self.join_depth = join_depth
+ if omit_join:
+ util.warn(
+ "setting omit_join to True is not supported; selectin "
+ "loading of this relationship may not work correctly if this "
+ "flag is set explicitly. omit_join optimization is "
+ "automatically detected for conditions under which it is "
+ "supported."
+ )
+
+ self.omit_join = omit_join
+ self.local_remote_pairs = _local_remote_pairs
+ self.bake_queries = bake_queries
+ self.load_on_pending = load_on_pending
+ self.comparator_factory = (
+ comparator_factory or RelationshipProperty.Comparator
+ )
+ self.comparator = self.comparator_factory(self, None)
+ util.set_creation_order(self)
+
+ if info is not None:
+ self.info = info
+
+ self.strategy_key = (("lazy", self.lazy),)
+
+ self._reverse_property = set()
+ if overlaps:
+ self._overlaps = set(re.split(r"\s*,\s*", overlaps))
+ else:
+ self._overlaps = ()
+
+ if cascade is not False:
+ self.cascade = cascade
+ elif self.viewonly:
+ self.cascade = "none"
+ else:
+ self.cascade = "save-update, merge"
+
+ self.order_by = order_by
+
+ self.back_populates = back_populates
+
+ if self.back_populates:
+ if backref:
+ raise sa_exc.ArgumentError(
+ "backref and back_populates keyword arguments "
+ "are mutually exclusive"
+ )
+ self.backref = None
+ else:
+ self.backref = backref
+
+ def _warn_for_persistence_only_flags(self, **kw):
+ for k, v in kw.items():
+ if v != self._persistence_only[k]:
+ # we are warning here rather than warn deprecated as this is a
+ # configuration mistake, and Python shows regular warnings more
+ # aggressively than deprecation warnings by default. Unlike the
+ # case of setting viewonly with cascade, the settings being
+ # warned about here are not actively doing the wrong thing
+ # against viewonly=True, so it is not as urgent to have these
+ # raise an error.
+ util.warn(
+ "Setting %s on relationship() while also "
+ "setting viewonly=True does not make sense, as a "
+ "viewonly=True relationship does not perform persistence "
+ "operations. This configuration may raise an error "
+ "in a future release." % (k,)
+ )
+
+ def instrument_class(self, mapper):
+ attributes.register_descriptor(
+ mapper.class_,
+ self.key,
+ comparator=self.comparator_factory(self, mapper),
+ parententity=mapper,
+ doc=self.doc,
+ )
+
+ class Comparator(PropComparator):
+ """Produce boolean, comparison, and other operators for
+ :class:`.RelationshipProperty` attributes.
+
+ See the documentation for :class:`.PropComparator` for a brief
+ overview of ORM level operator definition.
+
+ .. seealso::
+
+ :class:`.PropComparator`
+
+ :class:`.ColumnProperty.Comparator`
+
+ :class:`.ColumnOperators`
+
+ :ref:`types_operators`
+
+ :attr:`.TypeEngine.comparator_factory`
+
+ """
+
+ _of_type = None
+ _extra_criteria = ()
+
+ def __init__(
+ self,
+ prop,
+ parentmapper,
+ adapt_to_entity=None,
+ of_type=None,
+ extra_criteria=(),
+ ):
+ """Construction of :class:`.RelationshipProperty.Comparator`
+ is internal to the ORM's attribute mechanics.
+
+ """
+ self.prop = prop
+ self._parententity = parentmapper
+ self._adapt_to_entity = adapt_to_entity
+ if of_type:
+ self._of_type = of_type
+ self._extra_criteria = extra_criteria
+
+ def adapt_to_entity(self, adapt_to_entity):
+ return self.__class__(
+ self.property,
+ self._parententity,
+ adapt_to_entity=adapt_to_entity,
+ of_type=self._of_type,
+ )
+
+ @util.memoized_property
+ def entity(self):
+ """The target entity referred to by this
+ :class:`.RelationshipProperty.Comparator`.
+
+ This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
+ object.
+
+ This is the "target" or "remote" side of the
+ :func:`_orm.relationship`.
+
+ """
+ # this is a relatively recent change made for
+ # 1.4.27 as part of #7244.
+ # TODO: shouldn't _of_type be inspected up front when received?
+ if self._of_type is not None:
+ return inspect(self._of_type)
+ else:
+ return self.property.entity
+
+ @util.memoized_property
+ def mapper(self):
+ """The target :class:`_orm.Mapper` referred to by this
+ :class:`.RelationshipProperty.Comparator`.
+
+ This is the "target" or "remote" side of the
+ :func:`_orm.relationship`.
+
+ """
+ return self.property.mapper
+
+ @util.memoized_property
+ def _parententity(self):
+ return self.property.parent
+
+ def _source_selectable(self):
+ if self._adapt_to_entity:
+ return self._adapt_to_entity.selectable
+ else:
+ return self.property.parent._with_polymorphic_selectable
+
+ def __clause_element__(self):
+ adapt_from = self._source_selectable()
+ if self._of_type:
+ of_type_entity = inspect(self._of_type)
+ else:
+ of_type_entity = None
+
+ (
+ pj,
+ sj,
+ source,
+ dest,
+ secondary,
+ target_adapter,
+ ) = self.property._create_joins(
+ source_selectable=adapt_from,
+ source_polymorphic=True,
+ of_type_entity=of_type_entity,
+ alias_secondary=True,
+ extra_criteria=self._extra_criteria,
+ )
+ if sj is not None:
+ return pj & sj
+ else:
+ return pj
+
+ def of_type(self, cls):
+ r"""Redefine this object in terms of a polymorphic subclass.
+
+ See :meth:`.PropComparator.of_type` for an example.
+
+
+ """
+ return RelationshipProperty.Comparator(
+ self.property,
+ self._parententity,
+ adapt_to_entity=self._adapt_to_entity,
+ of_type=cls,
+ extra_criteria=self._extra_criteria,
+ )
+
+ def and_(self, *other):
+ """Add AND criteria.
+
+ See :meth:`.PropComparator.and_` for an example.
+
+ .. versionadded:: 1.4
+
+ """
+ return RelationshipProperty.Comparator(
+ self.property,
+ self._parententity,
+ adapt_to_entity=self._adapt_to_entity,
+ of_type=self._of_type,
+ extra_criteria=self._extra_criteria + other,
+ )
+
+ def in_(self, other):
+ """Produce an IN clause - this is not implemented
+ for :func:`_orm.relationship`-based attributes at this time.
+
+ """
+ raise NotImplementedError(
+ "in_() not yet supported for "
+ "relationships. For a simple "
+ "many-to-one, use in_() against "
+ "the set of foreign key values."
+ )
+
+ __hash__ = None
+
+ def __eq__(self, other):
+ """Implement the ``==`` operator.
+
+ In a many-to-one context, such as::
+
+ MyClass.some_prop == <some object>
+
+ this will typically produce a
+ clause such as::
+
+ mytable.related_id == <some id>
+
+ Where ``<some id>`` is the primary key of the given
+ object.
+
+ The ``==`` operator provides partial functionality for non-
+ many-to-one comparisons:
+
+ * Comparisons against collections are not supported.
+ Use :meth:`~.RelationshipProperty.Comparator.contains`.
+ * Compared to a scalar one-to-many, will produce a
+ clause that compares the target columns in the parent to
+ the given target.
+ * Compared to a scalar many-to-many, an alias
+ of the association table will be rendered as
+ well, forming a natural join that is part of the
+ main body of the query. This will not work for
+ queries that go beyond simple AND conjunctions of
+ comparisons, such as those which use OR. Use
+ explicit joins, outerjoins, or
+ :meth:`~.RelationshipProperty.Comparator.has` for
+ more comprehensive non-many-to-one scalar
+ membership tests.
+ * Comparisons against ``None`` given in a one-to-many
+ or many-to-many context produce a NOT EXISTS clause.
+
+ """
+ if isinstance(other, (util.NoneType, expression.Null)):
+ if self.property.direction in [ONETOMANY, MANYTOMANY]:
+ return ~self._criterion_exists()
+ else:
+ return _orm_annotate(
+ self.property._optimized_compare(
+ None, adapt_source=self.adapter
+ )
+ )
+ elif self.property.uselist:
+ raise sa_exc.InvalidRequestError(
+ "Can't compare a collection to an object or collection; "
+ "use contains() to test for membership."
+ )
+ else:
+ return _orm_annotate(
+ self.property._optimized_compare(
+ other, adapt_source=self.adapter
+ )
+ )
+
+ def _criterion_exists(self, criterion=None, **kwargs):
+ if getattr(self, "_of_type", None):
+ info = inspect(self._of_type)
+ target_mapper, to_selectable, is_aliased_class = (
+ info.mapper,
+ info.selectable,
+ info.is_aliased_class,
+ )
+ if self.property._is_self_referential and not is_aliased_class:
+ to_selectable = to_selectable._anonymous_fromclause()
+
+ single_crit = target_mapper._single_table_criterion
+ if single_crit is not None:
+ if criterion is not None:
+ criterion = single_crit & criterion
+ else:
+ criterion = single_crit
+ else:
+ is_aliased_class = False
+ to_selectable = None
+
+ if self.adapter:
+ source_selectable = self._source_selectable()
+ else:
+ source_selectable = None
+
+ (
+ pj,
+ sj,
+ source,
+ dest,
+ secondary,
+ target_adapter,
+ ) = self.property._create_joins(
+ dest_selectable=to_selectable,
+ source_selectable=source_selectable,
+ )
+
+ for k in kwargs:
+ crit = getattr(self.property.mapper.class_, k) == kwargs[k]
+ if criterion is None:
+ criterion = crit
+ else:
+ criterion = criterion & crit
+
+ # annotate the *local* side of the join condition, in the case
+ # of pj + sj this is the full primaryjoin, in the case of just
+ # pj its the local side of the primaryjoin.
+ if sj is not None:
+ j = _orm_annotate(pj) & sj
+ else:
+ j = _orm_annotate(pj, exclude=self.property.remote_side)
+
+ if (
+ criterion is not None
+ and target_adapter
+ and not is_aliased_class
+ ):
+ # limit this adapter to annotated only?
+ criterion = target_adapter.traverse(criterion)
+
+ # only have the "joined left side" of what we
+ # return be subject to Query adaption. The right
+ # side of it is used for an exists() subquery and
+ # should not correlate or otherwise reach out
+ # to anything in the enclosing query.
+ if criterion is not None:
+ criterion = criterion._annotate(
+ {"no_replacement_traverse": True}
+ )
+
+ crit = j & sql.True_._ifnone(criterion)
+
+ if secondary is not None:
+ ex = (
+ sql.exists(1)
+ .where(crit)
+ .select_from(dest, secondary)
+ .correlate_except(dest, secondary)
+ )
+ else:
+ ex = (
+ sql.exists(1)
+ .where(crit)
+ .select_from(dest)
+ .correlate_except(dest)
+ )
+ return ex
+
+ def any(self, criterion=None, **kwargs):
+ """Produce an expression that tests a collection against
+ particular criterion, using EXISTS.
+
+ An expression like::
+
+ session.query(MyClass).filter(
+ MyClass.somereference.any(SomeRelated.x==2)
+ )
+
+
+ Will produce a query like::
+
+ SELECT * FROM my_table WHERE
+ EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
+ AND related.x=2)
+
+ Because :meth:`~.RelationshipProperty.Comparator.any` uses
+ a correlated subquery, its performance is not nearly as
+ good when compared against large target tables as that of
+ using a join.
+
+ :meth:`~.RelationshipProperty.Comparator.any` is particularly
+ useful for testing for empty collections::
+
+ session.query(MyClass).filter(
+ ~MyClass.somereference.any()
+ )
+
+ will produce::
+
+ SELECT * FROM my_table WHERE
+ NOT (EXISTS (SELECT 1 FROM related WHERE
+ related.my_id=my_table.id))
+
+ :meth:`~.RelationshipProperty.Comparator.any` is only
+ valid for collections, i.e. a :func:`_orm.relationship`
+ that has ``uselist=True``. For scalar references,
+ use :meth:`~.RelationshipProperty.Comparator.has`.
+
+ """
+ if not self.property.uselist:
+ raise sa_exc.InvalidRequestError(
+ "'any()' not implemented for scalar "
+ "attributes. Use has()."
+ )
+
+ return self._criterion_exists(criterion, **kwargs)
+
+ def has(self, criterion=None, **kwargs):
+ """Produce an expression that tests a scalar reference against
+ particular criterion, using EXISTS.
+
+ An expression like::
+
+ session.query(MyClass).filter(
+ MyClass.somereference.has(SomeRelated.x==2)
+ )
+
+
+ Will produce a query like::
+
+ SELECT * FROM my_table WHERE
+ EXISTS (SELECT 1 FROM related WHERE
+ related.id==my_table.related_id AND related.x=2)
+
+ Because :meth:`~.RelationshipProperty.Comparator.has` uses
+ a correlated subquery, its performance is not nearly as
+ good when compared against large target tables as that of
+ using a join.
+
+ :meth:`~.RelationshipProperty.Comparator.has` is only
+ valid for scalar references, i.e. a :func:`_orm.relationship`
+ that has ``uselist=False``. For collection references,
+ use :meth:`~.RelationshipProperty.Comparator.any`.
+
+ """
+ if self.property.uselist:
+ raise sa_exc.InvalidRequestError(
+ "'has()' not implemented for collections. " "Use any()."
+ )
+ return self._criterion_exists(criterion, **kwargs)
+
+ def contains(self, other, **kwargs):
+ """Return a simple expression that tests a collection for
+ containment of a particular item.
+
+ :meth:`~.RelationshipProperty.Comparator.contains` is
+ only valid for a collection, i.e. a
+ :func:`_orm.relationship` that implements
+ one-to-many or many-to-many with ``uselist=True``.
+
+ When used in a simple one-to-many context, an
+ expression like::
+
+ MyClass.contains(other)
+
+ Produces a clause like::
+
+ mytable.id == <some id>
+
+ Where ``<some id>`` is the value of the foreign key
+ attribute on ``other`` which refers to the primary
+ key of its parent object. From this it follows that
+ :meth:`~.RelationshipProperty.Comparator.contains` is
+ very useful when used with simple one-to-many
+ operations.
+
+ For many-to-many operations, the behavior of
+ :meth:`~.RelationshipProperty.Comparator.contains`
+ has more caveats. The association table will be
+ rendered in the statement, producing an "implicit"
+ join, that is, includes multiple tables in the FROM
+ clause which are equated in the WHERE clause::
+
+ query(MyClass).filter(MyClass.contains(other))
+
+ Produces a query like::
+
+ SELECT * FROM my_table, my_association_table AS
+ my_association_table_1 WHERE
+ my_table.id = my_association_table_1.parent_id
+ AND my_association_table_1.child_id = <some id>
+
+ Where ``<some id>`` would be the primary key of
+ ``other``. From the above, it is clear that
+ :meth:`~.RelationshipProperty.Comparator.contains`
+ will **not** work with many-to-many collections when
+ used in queries that move beyond simple AND
+ conjunctions, such as multiple
+ :meth:`~.RelationshipProperty.Comparator.contains`
+ expressions joined by OR. In such cases subqueries or
+ explicit "outer joins" will need to be used instead.
+ See :meth:`~.RelationshipProperty.Comparator.any` for
+ a less-performant alternative using EXISTS, or refer
+ to :meth:`_query.Query.outerjoin`
+ as well as :ref:`orm_queryguide_joins`
+ for more details on constructing outer joins.
+
+ kwargs may be ignored by this operator but are required for API
+ conformance.
+ """
+ if not self.property.uselist:
+ raise sa_exc.InvalidRequestError(
+ "'contains' not implemented for scalar "
+ "attributes. Use =="
+ )
+ clause = self.property._optimized_compare(
+ other, adapt_source=self.adapter
+ )
+
+ if self.property.secondaryjoin is not None:
+ clause.negation_clause = self.__negated_contains_or_equals(
+ other
+ )
+
+ return clause
+
+ def __negated_contains_or_equals(self, other):
+ if self.property.direction == MANYTOONE:
+ state = attributes.instance_state(other)
+
+ def state_bindparam(local_col, state, remote_col):
+ dict_ = state.dict
+ return sql.bindparam(
+ local_col.key,
+ type_=local_col.type,
+ unique=True,
+ callable_=self.property._get_attr_w_warn_on_none(
+ self.property.mapper, state, dict_, remote_col
+ ),
+ )
+
+ def adapt(col):
+ if self.adapter:
+ return self.adapter(col)
+ else:
+ return col
+
+ if self.property._use_get:
+ return sql.and_(
+ *[
+ sql.or_(
+ adapt(x)
+ != state_bindparam(adapt(x), state, y),
+ adapt(x) == None,
+ )
+ for (x, y) in self.property.local_remote_pairs
+ ]
+ )
+
+ criterion = sql.and_(
+ *[
+ x == y
+ for (x, y) in zip(
+ self.property.mapper.primary_key,
+ self.property.mapper.primary_key_from_instance(other),
+ )
+ ]
+ )
+
+ return ~self._criterion_exists(criterion)
+
+ def __ne__(self, other):
+ """Implement the ``!=`` operator.
+
+ In a many-to-one context, such as::
+
+ MyClass.some_prop != <some object>
+
+ This will typically produce a clause such as::
+
+ mytable.related_id != <some id>
+
+ Where ``<some id>`` is the primary key of the
+ given object.
+
+ The ``!=`` operator provides partial functionality for non-
+ many-to-one comparisons:
+
+ * Comparisons against collections are not supported.
+ Use
+ :meth:`~.RelationshipProperty.Comparator.contains`
+ in conjunction with :func:`_expression.not_`.
+ * Compared to a scalar one-to-many, will produce a
+ clause that compares the target columns in the parent to
+ the given target.
+ * Compared to a scalar many-to-many, an alias
+ of the association table will be rendered as
+ well, forming a natural join that is part of the
+ main body of the query. This will not work for
+ queries that go beyond simple AND conjunctions of
+ comparisons, such as those which use OR. Use
+ explicit joins, outerjoins, or
+ :meth:`~.RelationshipProperty.Comparator.has` in
+ conjunction with :func:`_expression.not_` for
+ more comprehensive non-many-to-one scalar
+ membership tests.
+ * Comparisons against ``None`` given in a one-to-many
+ or many-to-many context produce an EXISTS clause.
+
+ """
+ if isinstance(other, (util.NoneType, expression.Null)):
+ if self.property.direction == MANYTOONE:
+ return _orm_annotate(
+ ~self.property._optimized_compare(
+ None, adapt_source=self.adapter
+ )
+ )
+
+ else:
+ return self._criterion_exists()
+ elif self.property.uselist:
+ raise sa_exc.InvalidRequestError(
+ "Can't compare a collection"
+ " to an object or collection; use "
+ "contains() to test for membership."
+ )
+ else:
+ return _orm_annotate(self.__negated_contains_or_equals(other))
+
+ @util.memoized_property
+ def property(self):
+ self.prop.parent._check_configure()
+ return self.prop
+
+ def _with_parent(self, instance, alias_secondary=True, from_entity=None):
+ assert instance is not None
+ adapt_source = None
+ if from_entity is not None:
+ insp = inspect(from_entity)
+ if insp.is_aliased_class:
+ adapt_source = insp._adapter.adapt_clause
+ return self._optimized_compare(
+ instance,
+ value_is_parent=True,
+ adapt_source=adapt_source,
+ alias_secondary=alias_secondary,
+ )
+
+ def _optimized_compare(
+ self,
+ state,
+ value_is_parent=False,
+ adapt_source=None,
+ alias_secondary=True,
+ ):
+ if state is not None:
+ try:
+ state = inspect(state)
+ except sa_exc.NoInspectionAvailable:
+ state = None
+
+ if state is None or not getattr(state, "is_instance", False):
+ raise sa_exc.ArgumentError(
+ "Mapped instance expected for relationship "
+ "comparison to object. Classes, queries and other "
+ "SQL elements are not accepted in this context; for "
+ "comparison with a subquery, "
+ "use %s.has(**criteria)." % self
+ )
+ reverse_direction = not value_is_parent
+
+ if state is None:
+ return self._lazy_none_clause(
+ reverse_direction, adapt_source=adapt_source
+ )
+
+ if not reverse_direction:
+ criterion, bind_to_col = (
+ self._lazy_strategy._lazywhere,
+ self._lazy_strategy._bind_to_col,
+ )
+ else:
+ criterion, bind_to_col = (
+ self._lazy_strategy._rev_lazywhere,
+ self._lazy_strategy._rev_bind_to_col,
+ )
+
+ if reverse_direction:
+ mapper = self.mapper
+ else:
+ mapper = self.parent
+
+ dict_ = attributes.instance_dict(state.obj())
+
+ def visit_bindparam(bindparam):
+ if bindparam._identifying_key in bind_to_col:
+ bindparam.callable = self._get_attr_w_warn_on_none(
+ mapper,
+ state,
+ dict_,
+ bind_to_col[bindparam._identifying_key],
+ )
+
+ if self.secondary is not None and alias_secondary:
+ criterion = ClauseAdapter(
+ self.secondary._anonymous_fromclause()
+ ).traverse(criterion)
+
+ criterion = visitors.cloned_traverse(
+ criterion, {}, {"bindparam": visit_bindparam}
+ )
+
+ if adapt_source:
+ criterion = adapt_source(criterion)
+ return criterion
+
+ def _get_attr_w_warn_on_none(self, mapper, state, dict_, column):
+ """Create the callable that is used in a many-to-one expression.
+
+ E.g.::
+
+ u1 = s.query(User).get(5)
+
+ expr = Address.user == u1
+
+ Above, the SQL should be "address.user_id = 5". The callable
+ returned by this method produces the value "5" based on the identity
+ of ``u1``.
+
+ """
+
+ # in this callable, we're trying to thread the needle through
+ # a wide variety of scenarios, including:
+ #
+ # * the object hasn't been flushed yet and there's no value for
+ # the attribute as of yet
+ #
+ # * the object hasn't been flushed yet but it has a user-defined
+ # value
+ #
+ # * the object has a value but it's expired and not locally present
+ #
+ # * the object has a value but it's expired and not locally present,
+ # and the object is also detached
+ #
+ # * The object hadn't been flushed yet, there was no value, but
+ # later, the object has been expired and detached, and *now*
+ # they're trying to evaluate it
+ #
+ # * the object had a value, but it was changed to a new value, and
+ # then expired
+ #
+ # * the object had a value, but it was changed to a new value, and
+ # then expired, then the object was detached
+ #
+ # * the object has a user-set value, but it's None and we don't do
+ # the comparison correctly for that so warn
+ #
+
+ prop = mapper.get_property_by_column(column)
+
+ # by invoking this method, InstanceState will track the last known
+ # value for this key each time the attribute is to be expired.
+ # this feature was added explicitly for use in this method.
+ state._track_last_known_value(prop.key)
+
+ def _go():
+ last_known = to_return = state._last_known_values[prop.key]
+ existing_is_available = last_known is not attributes.NO_VALUE
+
+ # we support that the value may have changed. so here we
+ # try to get the most recent value including re-fetching.
+ # only if we can't get a value now due to detachment do we return
+ # the last known value
+ current_value = mapper._get_state_attr_by_column(
+ state,
+ dict_,
+ column,
+ passive=attributes.PASSIVE_OFF
+ if state.persistent
+ else attributes.PASSIVE_NO_FETCH ^ attributes.INIT_OK,
+ )
+
+ if current_value is attributes.NEVER_SET:
+ if not existing_is_available:
+ raise sa_exc.InvalidRequestError(
+ "Can't resolve value for column %s on object "
+ "%s; no value has been set for this column"
+ % (column, state_str(state))
+ )
+ elif current_value is attributes.PASSIVE_NO_RESULT:
+ if not existing_is_available:
+ raise sa_exc.InvalidRequestError(
+ "Can't resolve value for column %s on object "
+ "%s; the object is detached and the value was "
+ "expired" % (column, state_str(state))
+ )
+ else:
+ to_return = current_value
+ if to_return is None:
+ util.warn(
+ "Got None for value of column %s; this is unsupported "
+ "for a relationship comparison and will not "
+ "currently produce an IS comparison "
+ "(but may in a future release)" % column
+ )
+ return to_return
+
+ return _go
+
+ def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
+ if not reverse_direction:
+ criterion, bind_to_col = (
+ self._lazy_strategy._lazywhere,
+ self._lazy_strategy._bind_to_col,
+ )
+ else:
+ criterion, bind_to_col = (
+ self._lazy_strategy._rev_lazywhere,
+ self._lazy_strategy._rev_bind_to_col,
+ )
+
+ criterion = adapt_criterion_to_null(criterion, bind_to_col)
+
+ if adapt_source:
+ criterion = adapt_source(criterion)
+ return criterion
+
+ def __str__(self):
+ return str(self.parent.class_.__name__) + "." + self.key
+
+ def merge(
+ self,
+ session,
+ source_state,
+ source_dict,
+ dest_state,
+ dest_dict,
+ load,
+ _recursive,
+ _resolve_conflict_map,
+ ):
+
+ if load:
+ for r in self._reverse_property:
+ if (source_state, r) in _recursive:
+ return
+
+ if "merge" not in self._cascade:
+ return
+
+ if self.key not in source_dict:
+ return
+
+ if self.uselist:
+ impl = source_state.get_impl(self.key)
+ instances_iterable = impl.get_collection(source_state, source_dict)
+
+ # if this is a CollectionAttributeImpl, then empty should
+ # be False, otherwise "self.key in source_dict" should not be
+ # True
+ assert not instances_iterable.empty if impl.collection else True
+
+ if load:
+ # for a full merge, pre-load the destination collection,
+ # so that individual _merge of each item pulls from identity
+ # map for those already present.
+ # also assumes CollectionAttributeImpl behavior of loading
+ # "old" list in any case
+ dest_state.get_impl(self.key).get(dest_state, dest_dict)
+
+ dest_list = []
+ for current in instances_iterable:
+ current_state = attributes.instance_state(current)
+ current_dict = attributes.instance_dict(current)
+ _recursive[(current_state, self)] = True
+ obj = session._merge(
+ current_state,
+ current_dict,
+ load=load,
+ _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map,
+ )
+ if obj is not None:
+ dest_list.append(obj)
+
+ if not load:
+ coll = attributes.init_state_collection(
+ dest_state, dest_dict, self.key
+ )
+ for c in dest_list:
+ coll.append_without_event(c)
+ else:
+ dest_state.get_impl(self.key).set(
+ dest_state, dest_dict, dest_list, _adapt=False
+ )
+ else:
+ current = source_dict[self.key]
+ if current is not None:
+ current_state = attributes.instance_state(current)
+ current_dict = attributes.instance_dict(current)
+ _recursive[(current_state, self)] = True
+ obj = session._merge(
+ current_state,
+ current_dict,
+ load=load,
+ _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map,
+ )
+ else:
+ obj = None
+
+ if not load:
+ dest_dict[self.key] = obj
+ else:
+ dest_state.get_impl(self.key).set(
+ dest_state, dest_dict, obj, None
+ )
+
+ def _value_as_iterable(
+ self, state, dict_, key, passive=attributes.PASSIVE_OFF
+ ):
+ """Return a list of tuples (state, obj) for the given
+ key.
+
+ returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
+ """
+
+ impl = state.manager[key].impl
+ x = impl.get(state, dict_, passive=passive)
+ if x is attributes.PASSIVE_NO_RESULT or x is None:
+ return []
+ elif hasattr(impl, "get_collection"):
+ return [
+ (attributes.instance_state(o), o)
+ for o in impl.get_collection(state, dict_, x, passive=passive)
+ ]
+ else:
+ return [(attributes.instance_state(x), x)]
+
+ def cascade_iterator(
+ self, type_, state, dict_, visited_states, halt_on=None
+ ):
+ # assert type_ in self._cascade
+
+ # only actively lazy load on the 'delete' cascade
+ if type_ != "delete" or self.passive_deletes:
+ passive = attributes.PASSIVE_NO_INITIALIZE
+ else:
+ passive = attributes.PASSIVE_OFF
+
+ if type_ == "save-update":
+ tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
+
+ else:
+ tuples = self._value_as_iterable(
+ state, dict_, self.key, passive=passive
+ )
+
+ skip_pending = (
+ type_ == "refresh-expire" and "delete-orphan" not in self._cascade
+ )
+
+ for instance_state, c in tuples:
+ if instance_state in visited_states:
+ continue
+
+ if c is None:
+ # would like to emit a warning here, but
+ # would not be consistent with collection.append(None)
+ # current behavior of silently skipping.
+ # see [ticket:2229]
+ continue
+
+ instance_dict = attributes.instance_dict(c)
+
+ if halt_on and halt_on(instance_state):
+ continue
+
+ if skip_pending and not instance_state.key:
+ continue
+
+ instance_mapper = instance_state.manager.mapper
+
+ if not instance_mapper.isa(self.mapper.class_manager.mapper):
+ raise AssertionError(
+ "Attribute '%s' on class '%s' "
+ "doesn't handle objects "
+ "of type '%s'"
+ % (self.key, self.parent.class_, c.__class__)
+ )
+
+ visited_states.add(instance_state)
+
+ yield c, instance_mapper, instance_state, instance_dict
+
+ @property
+ def _effective_sync_backref(self):
+ if self.viewonly:
+ return False
+ else:
+ return self.sync_backref is not False
+
+ @staticmethod
+ def _check_sync_backref(rel_a, rel_b):
+ if rel_a.viewonly and rel_b.sync_backref:
+ raise sa_exc.InvalidRequestError(
+ "Relationship %s cannot specify sync_backref=True since %s "
+ "includes viewonly=True." % (rel_b, rel_a)
+ )
+ if (
+ rel_a.viewonly
+ and not rel_b.viewonly
+ and rel_b.sync_backref is not False
+ ):
+ rel_b.sync_backref = False
+
+ def _add_reverse_property(self, key):
+ other = self.mapper.get_property(key, _configure_mappers=False)
+ if not isinstance(other, RelationshipProperty):
+ raise sa_exc.InvalidRequestError(
+ "back_populates on relationship '%s' refers to attribute '%s' "
+ "that is not a relationship. The back_populates parameter "
+ "should refer to the name of a relationship on the target "
+ "class." % (self, other)
+ )
+ # viewonly and sync_backref cases
+ # 1. self.viewonly==True and other.sync_backref==True -> error
+ # 2. self.viewonly==True and other.viewonly==False and
+ # other.sync_backref==None -> warn sync_backref=False, set to False
+ self._check_sync_backref(self, other)
+ # 3. other.viewonly==True and self.sync_backref==True -> error
+ # 4. other.viewonly==True and self.viewonly==False and
+ # self.sync_backref==None -> warn sync_backref=False, set to False
+ self._check_sync_backref(other, self)
+
+ self._reverse_property.add(other)
+ other._reverse_property.add(self)
+
+ if not other.mapper.common_parent(self.parent):
+ raise sa_exc.ArgumentError(
+ "reverse_property %r on "
+ "relationship %s references relationship %s, which "
+ "does not reference mapper %s"
+ % (key, self, other, self.parent)
+ )
+
+ if (
+ self.direction in (ONETOMANY, MANYTOONE)
+ and self.direction == other.direction
+ ):
+ raise sa_exc.ArgumentError(
+ "%s and back-reference %s are "
+ "both of the same direction %r. Did you mean to "
+ "set remote_side on the many-to-one side ?"
+ % (other, self, self.direction)
+ )
+
+ @util.memoized_property
+ @util.preload_module("sqlalchemy.orm.mapper")
+ def entity(self):
+ """Return the target mapped entity, which is an inspect() of the
+ class or aliased class that is referred towards.
+
+ """
+
+ mapperlib = util.preloaded.orm_mapper
+
+ if isinstance(self.argument, util.string_types):
+ argument = self._clsregistry_resolve_name(self.argument)()
+
+ elif callable(self.argument) and not isinstance(
+ self.argument, (type, mapperlib.Mapper)
+ ):
+ argument = self.argument()
+ else:
+ argument = self.argument
+
+ if isinstance(argument, type):
+ return mapperlib.class_mapper(argument, configure=False)
+
+ try:
+ entity = inspect(argument)
+ except sa_exc.NoInspectionAvailable:
+ pass
+ else:
+ if hasattr(entity, "mapper"):
+ return entity
+
+ raise sa_exc.ArgumentError(
+ "relationship '%s' expects "
+ "a class or a mapper argument (received: %s)"
+ % (self.key, type(argument))
+ )
+
+ @util.memoized_property
+ def mapper(self):
+ """Return the targeted :class:`_orm.Mapper` for this
+ :class:`.RelationshipProperty`.
+
+ This is a lazy-initializing static attribute.
+
+ """
+ return self.entity.mapper
+
+ def do_init(self):
+ self._check_conflicts()
+ self._process_dependent_arguments()
+ self._setup_registry_dependencies()
+ self._setup_join_conditions()
+ self._check_cascade_settings(self._cascade)
+ self._post_init()
+ self._generate_backref()
+ self._join_condition._warn_for_conflicting_sync_targets()
+ super(RelationshipProperty, self).do_init()
+ self._lazy_strategy = self._get_strategy((("lazy", "select"),))
+
+ def _setup_registry_dependencies(self):
+ self.parent.mapper.registry._set_depends_on(
+ self.entity.mapper.registry
+ )
+
+ def _process_dependent_arguments(self):
+ """Convert incoming configuration arguments to their
+ proper form.
+
+ Callables are resolved, ORM annotations removed.
+
+ """
+
+ # accept callables for other attributes which may require
+ # deferred initialization. This technique is used
+ # by declarative "string configs" and some recipes.
+ for attr in (
+ "order_by",
+ "primaryjoin",
+ "secondaryjoin",
+ "secondary",
+ "_user_defined_foreign_keys",
+ "remote_side",
+ ):
+ attr_value = getattr(self, attr)
+
+ if isinstance(attr_value, util.string_types):
+ setattr(
+ self,
+ attr,
+ self._clsregistry_resolve_arg(
+ attr_value, favor_tables=attr == "secondary"
+ )(),
+ )
+ elif callable(attr_value) and not _is_mapped_class(attr_value):
+ setattr(self, attr, attr_value())
+
+ # remove "annotations" which are present if mapped class
+ # descriptors are used to create the join expression.
+ for attr in "primaryjoin", "secondaryjoin":
+ val = getattr(self, attr)
+ if val is not None:
+ setattr(
+ self,
+ attr,
+ _orm_deannotate(
+ coercions.expect(
+ roles.ColumnArgumentRole, val, argname=attr
+ )
+ ),
+ )
+
+ if self.secondary is not None and _is_mapped_class(self.secondary):
+ raise sa_exc.ArgumentError(
+ "secondary argument %s passed to to relationship() %s must "
+ "be a Table object or other FROM clause; can't send a mapped "
+ "class directly as rows in 'secondary' are persisted "
+ "independently of a class that is mapped "
+ "to that same table." % (self.secondary, self)
+ )
+
+ # ensure expressions in self.order_by, foreign_keys,
+ # remote_side are all columns, not strings.
+ if self.order_by is not False and self.order_by is not None:
+ self.order_by = tuple(
+ coercions.expect(
+ roles.ColumnArgumentRole, x, argname="order_by"
+ )
+ for x in util.to_list(self.order_by)
+ )
+
+ self._user_defined_foreign_keys = util.column_set(
+ coercions.expect(
+ roles.ColumnArgumentRole, x, argname="foreign_keys"
+ )
+ for x in util.to_column_set(self._user_defined_foreign_keys)
+ )
+
+ self.remote_side = util.column_set(
+ coercions.expect(
+ roles.ColumnArgumentRole, x, argname="remote_side"
+ )
+ for x in util.to_column_set(self.remote_side)
+ )
+
+ self.target = self.entity.persist_selectable
+
+ def _setup_join_conditions(self):
+ self._join_condition = jc = JoinCondition(
+ parent_persist_selectable=self.parent.persist_selectable,
+ child_persist_selectable=self.entity.persist_selectable,
+ parent_local_selectable=self.parent.local_table,
+ child_local_selectable=self.entity.local_table,
+ primaryjoin=self.primaryjoin,
+ secondary=self.secondary,
+ secondaryjoin=self.secondaryjoin,
+ parent_equivalents=self.parent._equivalent_columns,
+ child_equivalents=self.mapper._equivalent_columns,
+ consider_as_foreign_keys=self._user_defined_foreign_keys,
+ local_remote_pairs=self.local_remote_pairs,
+ remote_side=self.remote_side,
+ self_referential=self._is_self_referential,
+ prop=self,
+ support_sync=not self.viewonly,
+ can_be_synced_fn=self._columns_are_mapped,
+ )
+ self.primaryjoin = jc.primaryjoin
+ self.secondaryjoin = jc.secondaryjoin
+ self.direction = jc.direction
+ self.local_remote_pairs = jc.local_remote_pairs
+ self.remote_side = jc.remote_columns
+ self.local_columns = jc.local_columns
+ self.synchronize_pairs = jc.synchronize_pairs
+ self._calculated_foreign_keys = jc.foreign_key_columns
+ self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
+
+ @property
+ def _clsregistry_resolve_arg(self):
+ return self._clsregistry_resolvers[1]
+
+ @property
+ def _clsregistry_resolve_name(self):
+ return self._clsregistry_resolvers[0]
+
+ @util.memoized_property
+ @util.preload_module("sqlalchemy.orm.clsregistry")
+ def _clsregistry_resolvers(self):
+ _resolver = util.preloaded.orm_clsregistry._resolver
+
+ return _resolver(self.parent.class_, self)
+
+ @util.preload_module("sqlalchemy.orm.mapper")
+ def _check_conflicts(self):
+ """Test that this relationship is legal, warn about
+ inheritance conflicts."""
+ mapperlib = util.preloaded.orm_mapper
+ if self.parent.non_primary and not mapperlib.class_mapper(
+ self.parent.class_, configure=False
+ ).has_property(self.key):
+ raise sa_exc.ArgumentError(
+ "Attempting to assign a new "
+ "relationship '%s' to a non-primary mapper on "
+ "class '%s'. New relationships can only be added "
+ "to the primary mapper, i.e. the very first mapper "
+ "created for class '%s' "
+ % (
+ self.key,
+ self.parent.class_.__name__,
+ self.parent.class_.__name__,
+ )
+ )
+
+ @property
+ def cascade(self):
+ """Return the current cascade setting for this
+ :class:`.RelationshipProperty`.
+ """
+ return self._cascade
+
+ @cascade.setter
+ def cascade(self, cascade):
+ self._set_cascade(cascade)
+
+ def _set_cascade(self, cascade):
+ cascade = CascadeOptions(cascade)
+
+ if self.viewonly:
+ non_viewonly = set(cascade).difference(
+ CascadeOptions._viewonly_cascades
+ )
+ if non_viewonly:
+ raise sa_exc.ArgumentError(
+ 'Cascade settings "%s" apply to persistence operations '
+ "and should not be combined with a viewonly=True "
+ "relationship." % (", ".join(sorted(non_viewonly)))
+ )
+
+ if "mapper" in self.__dict__:
+ self._check_cascade_settings(cascade)
+ self._cascade = cascade
+
+ if self._dependency_processor:
+ self._dependency_processor.cascade = cascade
+
+ def _check_cascade_settings(self, cascade):
+ if (
+ cascade.delete_orphan
+ and not self.single_parent
+ and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
+ ):
+ raise sa_exc.ArgumentError(
+ "For %(direction)s relationship %(rel)s, delete-orphan "
+ "cascade is normally "
+ 'configured only on the "one" side of a one-to-many '
+ "relationship, "
+ 'and not on the "many" side of a many-to-one or many-to-many '
+ "relationship. "
+ "To force this relationship to allow a particular "
+ '"%(relatedcls)s" object to be referred towards by only '
+ 'a single "%(clsname)s" object at a time via the '
+ "%(rel)s relationship, which "
+ "would allow "
+ "delete-orphan cascade to take place in this direction, set "
+ "the single_parent=True flag."
+ % {
+ "rel": self,
+ "direction": "many-to-one"
+ if self.direction is MANYTOONE
+ else "many-to-many",
+ "clsname": self.parent.class_.__name__,
+ "relatedcls": self.mapper.class_.__name__,
+ },
+ code="bbf0",
+ )
+
+ if self.passive_deletes == "all" and (
+ "delete" in cascade or "delete-orphan" in cascade
+ ):
+ raise sa_exc.ArgumentError(
+ "On %s, can't set passive_deletes='all' in conjunction "
+ "with 'delete' or 'delete-orphan' cascade" % self
+ )
+
+ if cascade.delete_orphan:
+ self.mapper.primary_mapper()._delete_orphans.append(
+ (self.key, self.parent.class_)
+ )
+
+ def _persists_for(self, mapper):
+ """Return True if this property will persist values on behalf
+ of the given mapper.
+
+ """
+
+ return (
+ self.key in mapper.relationships
+ and mapper.relationships[self.key] is self
+ )
+
+ def _columns_are_mapped(self, *cols):
+ """Return True if all columns in the given collection are
+ mapped by the tables referenced by this :class:`.Relationship`.
+
+ """
+ for c in cols:
+ if (
+ self.secondary is not None
+ and self.secondary.c.contains_column(c)
+ ):
+ continue
+ if not self.parent.persist_selectable.c.contains_column(
+ c
+ ) and not self.target.c.contains_column(c):
+ return False
+ return True
+
+ def _generate_backref(self):
+ """Interpret the 'backref' instruction to create a
+ :func:`_orm.relationship` complementary to this one."""
+
+ if self.parent.non_primary:
+ return
+ if self.backref is not None and not self.back_populates:
+ if isinstance(self.backref, util.string_types):
+ backref_key, kwargs = self.backref, {}
+ else:
+ backref_key, kwargs = self.backref
+ mapper = self.mapper.primary_mapper()
+
+ if not mapper.concrete:
+ check = set(mapper.iterate_to_root()).union(
+ mapper.self_and_descendants
+ )
+ for m in check:
+ if m.has_property(backref_key) and not m.concrete:
+ raise sa_exc.ArgumentError(
+ "Error creating backref "
+ "'%s' on relationship '%s': property of that "
+ "name exists on mapper '%s'"
+ % (backref_key, self, m)
+ )
+
+ # determine primaryjoin/secondaryjoin for the
+ # backref. Use the one we had, so that
+ # a custom join doesn't have to be specified in
+ # both directions.
+ if self.secondary is not None:
+ # for many to many, just switch primaryjoin/
+ # secondaryjoin. use the annotated
+ # pj/sj on the _join_condition.
+ pj = kwargs.pop(
+ "primaryjoin",
+ self._join_condition.secondaryjoin_minus_local,
+ )
+ sj = kwargs.pop(
+ "secondaryjoin",
+ self._join_condition.primaryjoin_minus_local,
+ )
+ else:
+ pj = kwargs.pop(
+ "primaryjoin",
+ self._join_condition.primaryjoin_reverse_remote,
+ )
+ sj = kwargs.pop("secondaryjoin", None)
+ if sj:
+ raise sa_exc.InvalidRequestError(
+ "Can't assign 'secondaryjoin' on a backref "
+ "against a non-secondary relationship."
+ )
+
+ foreign_keys = kwargs.pop(
+ "foreign_keys", self._user_defined_foreign_keys
+ )
+ parent = self.parent.primary_mapper()
+ kwargs.setdefault("viewonly", self.viewonly)
+ kwargs.setdefault("post_update", self.post_update)
+ kwargs.setdefault("passive_updates", self.passive_updates)
+ kwargs.setdefault("sync_backref", self.sync_backref)
+ self.back_populates = backref_key
+ relationship = RelationshipProperty(
+ parent,
+ self.secondary,
+ pj,
+ sj,
+ foreign_keys=foreign_keys,
+ back_populates=self.key,
+ **kwargs
+ )
+ mapper._configure_property(backref_key, relationship)
+
+ if self.back_populates:
+ self._add_reverse_property(self.back_populates)
+
+ @util.preload_module("sqlalchemy.orm.dependency")
+ def _post_init(self):
+ dependency = util.preloaded.orm_dependency
+
+ if self.uselist is None:
+ self.uselist = self.direction is not MANYTOONE
+ if not self.viewonly:
+ self._dependency_processor = (
+ dependency.DependencyProcessor.from_relationship
+ )(self)
+
+ @util.memoized_property
+ def _use_get(self):
+ """memoize the 'use_get' attribute of this RelationshipLoader's
+ lazyloader."""
+
+ strategy = self._lazy_strategy
+ return strategy.use_get
+
+ @util.memoized_property
+ def _is_self_referential(self):
+ return self.mapper.common_parent(self.parent)
+
+ def _create_joins(
+ self,
+ source_polymorphic=False,
+ source_selectable=None,
+ dest_selectable=None,
+ of_type_entity=None,
+ alias_secondary=False,
+ extra_criteria=(),
+ ):
+
+ aliased = False
+
+ if alias_secondary and self.secondary is not None:
+ aliased = True
+
+ if source_selectable is None:
+ if source_polymorphic and self.parent.with_polymorphic:
+ source_selectable = self.parent._with_polymorphic_selectable
+
+ if of_type_entity:
+ dest_mapper = of_type_entity.mapper
+ if dest_selectable is None:
+ dest_selectable = of_type_entity.selectable
+ aliased = True
+ else:
+ dest_mapper = self.mapper
+
+ if dest_selectable is None:
+ dest_selectable = self.entity.selectable
+ if self.mapper.with_polymorphic:
+ aliased = True
+
+ if self._is_self_referential and source_selectable is None:
+ dest_selectable = dest_selectable._anonymous_fromclause()
+ aliased = True
+ elif (
+ dest_selectable is not self.mapper._with_polymorphic_selectable
+ or self.mapper.with_polymorphic
+ ):
+ aliased = True
+
+ single_crit = dest_mapper._single_table_criterion
+ aliased = aliased or (
+ source_selectable is not None
+ and (
+ source_selectable
+ is not self.parent._with_polymorphic_selectable
+ or source_selectable._is_subquery
+ )
+ )
+
+ (
+ primaryjoin,
+ secondaryjoin,
+ secondary,
+ target_adapter,
+ dest_selectable,
+ ) = self._join_condition.join_targets(
+ source_selectable,
+ dest_selectable,
+ aliased,
+ single_crit,
+ extra_criteria,
+ )
+ if source_selectable is None:
+ source_selectable = self.parent.local_table
+ if dest_selectable is None:
+ dest_selectable = self.entity.local_table
+ return (
+ primaryjoin,
+ secondaryjoin,
+ source_selectable,
+ dest_selectable,
+ secondary,
+ target_adapter,
+ )
+
+
+def _annotate_columns(element, annotations):
+ def clone(elem):
+ if isinstance(elem, expression.ColumnClause):
+ elem = elem._annotate(annotations.copy())
+ elem._copy_internals(clone=clone)
+ return elem
+
+ if element is not None:
+ element = clone(element)
+ clone = None # remove gc cycles
+ return element
+
+
+class JoinCondition(object):
+ def __init__(
+ self,
+ parent_persist_selectable,
+ child_persist_selectable,
+ parent_local_selectable,
+ child_local_selectable,
+ primaryjoin=None,
+ secondary=None,
+ secondaryjoin=None,
+ parent_equivalents=None,
+ child_equivalents=None,
+ consider_as_foreign_keys=None,
+ local_remote_pairs=None,
+ remote_side=None,
+ self_referential=False,
+ prop=None,
+ support_sync=True,
+ can_be_synced_fn=lambda *c: True,
+ ):
+ self.parent_persist_selectable = parent_persist_selectable
+ self.parent_local_selectable = parent_local_selectable
+ self.child_persist_selectable = child_persist_selectable
+ self.child_local_selectable = child_local_selectable
+ self.parent_equivalents = parent_equivalents
+ self.child_equivalents = child_equivalents
+ self.primaryjoin = primaryjoin
+ self.secondaryjoin = secondaryjoin
+ self.secondary = secondary
+ self.consider_as_foreign_keys = consider_as_foreign_keys
+ self._local_remote_pairs = local_remote_pairs
+ self._remote_side = remote_side
+ self.prop = prop
+ self.self_referential = self_referential
+ self.support_sync = support_sync
+ self.can_be_synced_fn = can_be_synced_fn
+ self._determine_joins()
+ self._sanitize_joins()
+ self._annotate_fks()
+ self._annotate_remote()
+ self._annotate_local()
+ self._annotate_parentmapper()
+ self._setup_pairs()
+ self._check_foreign_cols(self.primaryjoin, True)
+ if self.secondaryjoin is not None:
+ self._check_foreign_cols(self.secondaryjoin, False)
+ self._determine_direction()
+ self._check_remote_side()
+ self._log_joins()
+
+ def _log_joins(self):
+ if self.prop is None:
+ return
+ log = self.prop.logger
+ log.info("%s setup primary join %s", self.prop, self.primaryjoin)
+ log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
+ log.info(
+ "%s synchronize pairs [%s]",
+ self.prop,
+ ",".join(
+ "(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
+ ),
+ )
+ log.info(
+ "%s secondary synchronize pairs [%s]",
+ self.prop,
+ ",".join(
+ "(%s => %s)" % (l, r)
+ for (l, r) in self.secondary_synchronize_pairs or []
+ ),
+ )
+ log.info(
+ "%s local/remote pairs [%s]",
+ self.prop,
+ ",".join(
+ "(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
+ ),
+ )
+ log.info(
+ "%s remote columns [%s]",
+ self.prop,
+ ",".join("%s" % col for col in self.remote_columns),
+ )
+ log.info(
+ "%s local columns [%s]",
+ self.prop,
+ ",".join("%s" % col for col in self.local_columns),
+ )
+ log.info("%s relationship direction %s", self.prop, self.direction)
+
+ def _sanitize_joins(self):
+ """remove the parententity annotation from our join conditions which
+ can leak in here based on some declarative patterns and maybe others.
+
+ We'd want to remove "parentmapper" also, but apparently there's
+ an exotic use case in _join_fixture_inh_selfref_w_entity
+ that relies upon it being present, see :ticket:`3364`.
+
+ """
+
+ self.primaryjoin = _deep_deannotate(
+ self.primaryjoin, values=("parententity", "proxy_key")
+ )
+ if self.secondaryjoin is not None:
+ self.secondaryjoin = _deep_deannotate(
+ self.secondaryjoin, values=("parententity", "proxy_key")
+ )
+
+ def _determine_joins(self):
+ """Determine the 'primaryjoin' and 'secondaryjoin' attributes,
+ if not passed to the constructor already.
+
+ This is based on analysis of the foreign key relationships
+ between the parent and target mapped selectables.
+
+ """
+ if self.secondaryjoin is not None and self.secondary is None:
+ raise sa_exc.ArgumentError(
+ "Property %s specified with secondary "
+ "join condition but "
+ "no secondary argument" % self.prop
+ )
+
+ # find a join between the given mapper's mapped table and
+ # the given table. will try the mapper's local table first
+ # for more specificity, then if not found will try the more
+ # general mapped table, which in the case of inheritance is
+ # a join.
+ try:
+ consider_as_foreign_keys = self.consider_as_foreign_keys or None
+ if self.secondary is not None:
+ if self.secondaryjoin is None:
+ self.secondaryjoin = join_condition(
+ self.child_persist_selectable,
+ self.secondary,
+ a_subset=self.child_local_selectable,
+ consider_as_foreign_keys=consider_as_foreign_keys,
+ )
+ if self.primaryjoin is None:
+ self.primaryjoin = join_condition(
+ self.parent_persist_selectable,
+ self.secondary,
+ a_subset=self.parent_local_selectable,
+ consider_as_foreign_keys=consider_as_foreign_keys,
+ )
+ else:
+ if self.primaryjoin is None:
+ self.primaryjoin = join_condition(
+ self.parent_persist_selectable,
+ self.child_persist_selectable,
+ a_subset=self.parent_local_selectable,
+ consider_as_foreign_keys=consider_as_foreign_keys,
+ )
+ except sa_exc.NoForeignKeysError as nfe:
+ if self.secondary is not None:
+ util.raise_(
+ sa_exc.NoForeignKeysError(
+ "Could not determine join "
+ "condition between parent/child tables on "
+ "relationship %s - there are no foreign keys "
+ "linking these tables via secondary table '%s'. "
+ "Ensure that referencing columns are associated "
+ "with a ForeignKey or ForeignKeyConstraint, or "
+ "specify 'primaryjoin' and 'secondaryjoin' "
+ "expressions." % (self.prop, self.secondary)
+ ),
+ from_=nfe,
+ )
+ else:
+ util.raise_(
+ sa_exc.NoForeignKeysError(
+ "Could not determine join "
+ "condition between parent/child tables on "
+ "relationship %s - there are no foreign keys "
+ "linking these tables. "
+ "Ensure that referencing columns are associated "
+ "with a ForeignKey or ForeignKeyConstraint, or "
+ "specify a 'primaryjoin' expression." % self.prop
+ ),
+ from_=nfe,
+ )
+ except sa_exc.AmbiguousForeignKeysError as afe:
+ if self.secondary is not None:
+ util.raise_(
+ sa_exc.AmbiguousForeignKeysError(
+ "Could not determine join "
+ "condition between parent/child tables on "
+ "relationship %s - there are multiple foreign key "
+ "paths linking the tables via secondary table '%s'. "
+ "Specify the 'foreign_keys' "
+ "argument, providing a list of those columns which "
+ "should be counted as containing a foreign key "
+ "reference from the secondary table to each of the "
+ "parent and child tables."
+ % (self.prop, self.secondary)
+ ),
+ from_=afe,
+ )
+ else:
+ util.raise_(
+ sa_exc.AmbiguousForeignKeysError(
+ "Could not determine join "
+ "condition between parent/child tables on "
+ "relationship %s - there are multiple foreign key "
+ "paths linking the tables. Specify the "
+ "'foreign_keys' argument, providing a list of those "
+ "columns which should be counted as containing a "
+ "foreign key reference to the parent table."
+ % self.prop
+ ),
+ from_=afe,
+ )
+
+ @property
+ def primaryjoin_minus_local(self):
+ return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
+
+ @property
+ def secondaryjoin_minus_local(self):
+ return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
+
+ @util.memoized_property
+ def primaryjoin_reverse_remote(self):
+ """Return the primaryjoin condition suitable for the
+ "reverse" direction.
+
+ If the primaryjoin was delivered here with pre-existing
+ "remote" annotations, the local/remote annotations
+ are reversed. Otherwise, the local/remote annotations
+ are removed.
+
+ """
+ if self._has_remote_annotations:
+
+ def replace(element):
+ if "remote" in element._annotations:
+ v = dict(element._annotations)
+ del v["remote"]
+ v["local"] = True
+ return element._with_annotations(v)
+ elif "local" in element._annotations:
+ v = dict(element._annotations)
+ del v["local"]
+ v["remote"] = True
+ return element._with_annotations(v)
+
+ return visitors.replacement_traverse(self.primaryjoin, {}, replace)
+ else:
+ if self._has_foreign_annotations:
+ # TODO: coverage
+ return _deep_deannotate(
+ self.primaryjoin, values=("local", "remote")
+ )
+ else:
+ return _deep_deannotate(self.primaryjoin)
+
+ def _has_annotation(self, clause, annotation):
+ for col in visitors.iterate(clause, {}):
+ if annotation in col._annotations:
+ return True
+ else:
+ return False
+
+ @util.memoized_property
+ def _has_foreign_annotations(self):
+ return self._has_annotation(self.primaryjoin, "foreign")
+
+ @util.memoized_property
+ def _has_remote_annotations(self):
+ return self._has_annotation(self.primaryjoin, "remote")
+
+ def _annotate_fks(self):
+ """Annotate the primaryjoin and secondaryjoin
+ structures with 'foreign' annotations marking columns
+ considered as foreign.
+
+ """
+ if self._has_foreign_annotations:
+ return
+
+ if self.consider_as_foreign_keys:
+ self._annotate_from_fk_list()
+ else:
+ self._annotate_present_fks()
+
+ def _annotate_from_fk_list(self):
+ def check_fk(col):
+ if col in self.consider_as_foreign_keys:
+ return col._annotate({"foreign": True})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, check_fk
+ )
+ if self.secondaryjoin is not None:
+ self.secondaryjoin = visitors.replacement_traverse(
+ self.secondaryjoin, {}, check_fk
+ )
+
+ def _annotate_present_fks(self):
+ if self.secondary is not None:
+ secondarycols = util.column_set(self.secondary.c)
+ else:
+ secondarycols = set()
+
+ def is_foreign(a, b):
+ if isinstance(a, schema.Column) and isinstance(b, schema.Column):
+ if a.references(b):
+ return a
+ elif b.references(a):
+ return b
+
+ if secondarycols:
+ if a in secondarycols and b not in secondarycols:
+ return a
+ elif b in secondarycols and a not in secondarycols:
+ return b
+
+ def visit_binary(binary):
+ if not isinstance(
+ binary.left, sql.ColumnElement
+ ) or not isinstance(binary.right, sql.ColumnElement):
+ return
+
+ if (
+ "foreign" not in binary.left._annotations
+ and "foreign" not in binary.right._annotations
+ ):
+ col = is_foreign(binary.left, binary.right)
+ if col is not None:
+ if col.compare(binary.left):
+ binary.left = binary.left._annotate({"foreign": True})
+ elif col.compare(binary.right):
+ binary.right = binary.right._annotate(
+ {"foreign": True}
+ )
+
+ self.primaryjoin = visitors.cloned_traverse(
+ self.primaryjoin, {}, {"binary": visit_binary}
+ )
+ if self.secondaryjoin is not None:
+ self.secondaryjoin = visitors.cloned_traverse(
+ self.secondaryjoin, {}, {"binary": visit_binary}
+ )
+
+ def _refers_to_parent_table(self):
+ """Return True if the join condition contains column
+ comparisons where both columns are in both tables.
+
+ """
+ pt = self.parent_persist_selectable
+ mt = self.child_persist_selectable
+ result = [False]
+
+ def visit_binary(binary):
+ c, f = binary.left, binary.right
+ if (
+ isinstance(c, expression.ColumnClause)
+ and isinstance(f, expression.ColumnClause)
+ and pt.is_derived_from(c.table)
+ and pt.is_derived_from(f.table)
+ and mt.is_derived_from(c.table)
+ and mt.is_derived_from(f.table)
+ ):
+ result[0] = True
+
+ visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
+ return result[0]
+
+ def _tables_overlap(self):
+ """Return True if parent/child tables have some overlap."""
+
+ return selectables_overlap(
+ self.parent_persist_selectable, self.child_persist_selectable
+ )
+
+ def _annotate_remote(self):
+ """Annotate the primaryjoin and secondaryjoin
+ structures with 'remote' annotations marking columns
+ considered as part of the 'remote' side.
+
+ """
+ if self._has_remote_annotations:
+ return
+
+ if self.secondary is not None:
+ self._annotate_remote_secondary()
+ elif self._local_remote_pairs or self._remote_side:
+ self._annotate_remote_from_args()
+ elif self._refers_to_parent_table():
+ self._annotate_selfref(
+ lambda col: "foreign" in col._annotations, False
+ )
+ elif self._tables_overlap():
+ self._annotate_remote_with_overlap()
+ else:
+ self._annotate_remote_distinct_selectables()
+
+ def _annotate_remote_secondary(self):
+ """annotate 'remote' in primaryjoin, secondaryjoin
+ when 'secondary' is present.
+
+ """
+
+ def repl(element):
+ if self.secondary.c.contains_column(element):
+ return element._annotate({"remote": True})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, repl
+ )
+ self.secondaryjoin = visitors.replacement_traverse(
+ self.secondaryjoin, {}, repl
+ )
+
+ def _annotate_selfref(self, fn, remote_side_given):
+ """annotate 'remote' in primaryjoin, secondaryjoin
+ when the relationship is detected as self-referential.
+
+ """
+
+ def visit_binary(binary):
+ equated = binary.left.compare(binary.right)
+ if isinstance(binary.left, expression.ColumnClause) and isinstance(
+ binary.right, expression.ColumnClause
+ ):
+ # assume one to many - FKs are "remote"
+ if fn(binary.left):
+ binary.left = binary.left._annotate({"remote": True})
+ if fn(binary.right) and not equated:
+ binary.right = binary.right._annotate({"remote": True})
+ elif not remote_side_given:
+ self._warn_non_column_elements()
+
+ self.primaryjoin = visitors.cloned_traverse(
+ self.primaryjoin, {}, {"binary": visit_binary}
+ )
+
+ def _annotate_remote_from_args(self):
+ """annotate 'remote' in primaryjoin, secondaryjoin
+ when the 'remote_side' or '_local_remote_pairs'
+ arguments are used.
+
+ """
+ if self._local_remote_pairs:
+ if self._remote_side:
+ raise sa_exc.ArgumentError(
+ "remote_side argument is redundant "
+ "against more detailed _local_remote_side "
+ "argument."
+ )
+
+ remote_side = [r for (l, r) in self._local_remote_pairs]
+ else:
+ remote_side = self._remote_side
+
+ if self._refers_to_parent_table():
+ self._annotate_selfref(lambda col: col in remote_side, True)
+ else:
+
+ def repl(element):
+ # use set() to avoid generating ``__eq__()`` expressions
+ # against each element
+ if element in set(remote_side):
+ return element._annotate({"remote": True})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, repl
+ )
+
+ def _annotate_remote_with_overlap(self):
+ """annotate 'remote' in primaryjoin, secondaryjoin
+ when the parent/child tables have some set of
+ tables in common, though is not a fully self-referential
+ relationship.
+
+ """
+
+ def visit_binary(binary):
+ binary.left, binary.right = proc_left_right(
+ binary.left, binary.right
+ )
+ binary.right, binary.left = proc_left_right(
+ binary.right, binary.left
+ )
+
+ check_entities = (
+ self.prop is not None and self.prop.mapper is not self.prop.parent
+ )
+
+ def proc_left_right(left, right):
+ if isinstance(left, expression.ColumnClause) and isinstance(
+ right, expression.ColumnClause
+ ):
+ if self.child_persist_selectable.c.contains_column(
+ right
+ ) and self.parent_persist_selectable.c.contains_column(left):
+ right = right._annotate({"remote": True})
+ elif (
+ check_entities
+ and right._annotations.get("parentmapper") is self.prop.mapper
+ ):
+ right = right._annotate({"remote": True})
+ elif (
+ check_entities
+ and left._annotations.get("parentmapper") is self.prop.mapper
+ ):
+ left = left._annotate({"remote": True})
+ else:
+ self._warn_non_column_elements()
+
+ return left, right
+
+ self.primaryjoin = visitors.cloned_traverse(
+ self.primaryjoin, {}, {"binary": visit_binary}
+ )
+
+ def _annotate_remote_distinct_selectables(self):
+ """annotate 'remote' in primaryjoin, secondaryjoin
+ when the parent/child tables are entirely
+ separate.
+
+ """
+
+ def repl(element):
+ if self.child_persist_selectable.c.contains_column(element) and (
+ not self.parent_local_selectable.c.contains_column(element)
+ or self.child_local_selectable.c.contains_column(element)
+ ):
+ return element._annotate({"remote": True})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, repl
+ )
+
+ def _warn_non_column_elements(self):
+ util.warn(
+ "Non-simple column elements in primary "
+ "join condition for property %s - consider using "
+ "remote() annotations to mark the remote side." % self.prop
+ )
+
+ def _annotate_local(self):
+ """Annotate the primaryjoin and secondaryjoin
+ structures with 'local' annotations.
+
+ This annotates all column elements found
+ simultaneously in the parent table
+ and the join condition that don't have a
+ 'remote' annotation set up from
+ _annotate_remote() or user-defined.
+
+ """
+ if self._has_annotation(self.primaryjoin, "local"):
+ return
+
+ if self._local_remote_pairs:
+ local_side = util.column_set(
+ [l for (l, r) in self._local_remote_pairs]
+ )
+ else:
+ local_side = util.column_set(self.parent_persist_selectable.c)
+
+ def locals_(elem):
+ if "remote" not in elem._annotations and elem in local_side:
+ return elem._annotate({"local": True})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, locals_
+ )
+
+ def _annotate_parentmapper(self):
+ if self.prop is None:
+ return
+
+ def parentmappers_(elem):
+ if "remote" in elem._annotations:
+ return elem._annotate({"parentmapper": self.prop.mapper})
+ elif "local" in elem._annotations:
+ return elem._annotate({"parentmapper": self.prop.parent})
+
+ self.primaryjoin = visitors.replacement_traverse(
+ self.primaryjoin, {}, parentmappers_
+ )
+
+ def _check_remote_side(self):
+ if not self.local_remote_pairs:
+ raise sa_exc.ArgumentError(
+ "Relationship %s could "
+ "not determine any unambiguous local/remote column "
+ "pairs based on join condition and remote_side "
+ "arguments. "
+ "Consider using the remote() annotation to "
+ "accurately mark those elements of the join "
+ "condition that are on the remote side of "
+ "the relationship." % (self.prop,)
+ )
+
+ def _check_foreign_cols(self, join_condition, primary):
+ """Check the foreign key columns collected and emit error
+ messages."""
+
+ can_sync = False
+
+ foreign_cols = self._gather_columns_with_annotation(
+ join_condition, "foreign"
+ )
+
+ has_foreign = bool(foreign_cols)
+
+ if primary:
+ can_sync = bool(self.synchronize_pairs)
+ else:
+ can_sync = bool(self.secondary_synchronize_pairs)
+
+ if (
+ self.support_sync
+ and can_sync
+ or (not self.support_sync and has_foreign)
+ ):
+ return
+
+ # from here below is just determining the best error message
+ # to report. Check for a join condition using any operator
+ # (not just ==), perhaps they need to turn on "viewonly=True".
+ if self.support_sync and has_foreign and not can_sync:
+ err = (
+ "Could not locate any simple equality expressions "
+ "involving locally mapped foreign key columns for "
+ "%s join condition "
+ "'%s' on relationship %s."
+ % (
+ primary and "primary" or "secondary",
+ join_condition,
+ self.prop,
+ )
+ )
+ err += (
+ " Ensure that referencing columns are associated "
+ "with a ForeignKey or ForeignKeyConstraint, or are "
+ "annotated in the join condition with the foreign() "
+ "annotation. To allow comparison operators other than "
+ "'==', the relationship can be marked as viewonly=True."
+ )
+
+ raise sa_exc.ArgumentError(err)
+ else:
+ err = (
+ "Could not locate any relevant foreign key columns "
+ "for %s join condition '%s' on relationship %s."
+ % (
+ primary and "primary" or "secondary",
+ join_condition,
+ self.prop,
+ )
+ )
+ err += (
+ " Ensure that referencing columns are associated "
+ "with a ForeignKey or ForeignKeyConstraint, or are "
+ "annotated in the join condition with the foreign() "
+ "annotation."
+ )
+ raise sa_exc.ArgumentError(err)
+
+ def _determine_direction(self):
+ """Determine if this relationship is one to many, many to one,
+ many to many.
+
+ """
+ if self.secondaryjoin is not None:
+ self.direction = MANYTOMANY
+ else:
+ parentcols = util.column_set(self.parent_persist_selectable.c)
+ targetcols = util.column_set(self.child_persist_selectable.c)
+
+ # fk collection which suggests ONETOMANY.
+ onetomany_fk = targetcols.intersection(self.foreign_key_columns)
+
+ # fk collection which suggests MANYTOONE.
+
+ manytoone_fk = parentcols.intersection(self.foreign_key_columns)
+
+ if onetomany_fk and manytoone_fk:
+ # fks on both sides. test for overlap of local/remote
+ # with foreign key.
+ # we will gather columns directly from their annotations
+ # without deannotating, so that we can distinguish on a column
+ # that refers to itself.
+
+ # 1. columns that are both remote and FK suggest
+ # onetomany.
+ onetomany_local = self._gather_columns_with_annotation(
+ self.primaryjoin, "remote", "foreign"
+ )
+
+ # 2. columns that are FK but are not remote (e.g. local)
+ # suggest manytoone.
+ manytoone_local = set(
+ [
+ c
+ for c in self._gather_columns_with_annotation(
+ self.primaryjoin, "foreign"
+ )
+ if "remote" not in c._annotations
+ ]
+ )
+
+ # 3. if both collections are present, remove columns that
+ # refer to themselves. This is for the case of
+ # and_(Me.id == Me.remote_id, Me.version == Me.version)
+ if onetomany_local and manytoone_local:
+ self_equated = self.remote_columns.intersection(
+ self.local_columns
+ )
+ onetomany_local = onetomany_local.difference(self_equated)
+ manytoone_local = manytoone_local.difference(self_equated)
+
+ # at this point, if only one or the other collection is
+ # present, we know the direction, otherwise it's still
+ # ambiguous.
+
+ if onetomany_local and not manytoone_local:
+ self.direction = ONETOMANY
+ elif manytoone_local and not onetomany_local:
+ self.direction = MANYTOONE
+ else:
+ raise sa_exc.ArgumentError(
+ "Can't determine relationship"
+ " direction for relationship '%s' - foreign "
+ "key columns within the join condition are present "
+ "in both the parent and the child's mapped tables. "
+ "Ensure that only those columns referring "
+ "to a parent column are marked as foreign, "
+ "either via the foreign() annotation or "
+ "via the foreign_keys argument." % self.prop
+ )
+ elif onetomany_fk:
+ self.direction = ONETOMANY
+ elif manytoone_fk:
+ self.direction = MANYTOONE
+ else:
+ raise sa_exc.ArgumentError(
+ "Can't determine relationship "
+ "direction for relationship '%s' - foreign "
+ "key columns are present in neither the parent "
+ "nor the child's mapped tables" % self.prop
+ )
+
+ def _deannotate_pairs(self, collection):
+ """provide deannotation for the various lists of
+ pairs, so that using them in hashes doesn't incur
+ high-overhead __eq__() comparisons against
+ original columns mapped.
+
+ """
+ return [(x._deannotate(), y._deannotate()) for x, y in collection]
+
+ def _setup_pairs(self):
+ sync_pairs = []
+ lrp = util.OrderedSet([])
+ secondary_sync_pairs = []
+
+ def go(joincond, collection):
+ def visit_binary(binary, left, right):
+ if (
+ "remote" in right._annotations
+ and "remote" not in left._annotations
+ and self.can_be_synced_fn(left)
+ ):
+ lrp.add((left, right))
+ elif (
+ "remote" in left._annotations
+ and "remote" not in right._annotations
+ and self.can_be_synced_fn(right)
+ ):
+ lrp.add((right, left))
+ if binary.operator is operators.eq and self.can_be_synced_fn(
+ left, right
+ ):
+ if "foreign" in right._annotations:
+ collection.append((left, right))
+ elif "foreign" in left._annotations:
+ collection.append((right, left))
+
+ visit_binary_product(visit_binary, joincond)
+
+ for joincond, collection in [
+ (self.primaryjoin, sync_pairs),
+ (self.secondaryjoin, secondary_sync_pairs),
+ ]:
+ if joincond is None:
+ continue
+ go(joincond, collection)
+
+ self.local_remote_pairs = self._deannotate_pairs(lrp)
+ self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
+ self.secondary_synchronize_pairs = self._deannotate_pairs(
+ secondary_sync_pairs
+ )
+
+ _track_overlapping_sync_targets = weakref.WeakKeyDictionary()
+
+ def _warn_for_conflicting_sync_targets(self):
+ if not self.support_sync:
+ return
+
+ # we would like to detect if we are synchronizing any column
+ # pairs in conflict with another relationship that wishes to sync
+ # an entirely different column to the same target. This is a
+ # very rare edge case so we will try to minimize the memory/overhead
+ # impact of this check
+ for from_, to_ in [
+ (from_, to_) for (from_, to_) in self.synchronize_pairs
+ ] + [
+ (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
+ ]:
+ # save ourselves a ton of memory and overhead by only
+ # considering columns that are subject to a overlapping
+ # FK constraints at the core level. This condition can arise
+ # if multiple relationships overlap foreign() directly, but
+ # we're going to assume it's typically a ForeignKeyConstraint-
+ # level configuration that benefits from this warning.
+
+ if to_ not in self._track_overlapping_sync_targets:
+ self._track_overlapping_sync_targets[
+ to_
+ ] = weakref.WeakKeyDictionary({self.prop: from_})
+ else:
+ other_props = []
+ prop_to_from = self._track_overlapping_sync_targets[to_]
+
+ for pr, fr_ in prop_to_from.items():
+ if (
+ not pr.mapper._dispose_called
+ and pr not in self.prop._reverse_property
+ and pr.key not in self.prop._overlaps
+ and self.prop.key not in pr._overlaps
+ # note: the "__*" symbol is used internally by
+ # SQLAlchemy as a general means of suppressing the
+ # overlaps warning for some extension cases, however
+ # this is not currently
+ # a publicly supported symbol and may change at
+ # any time.
+ and "__*" not in self.prop._overlaps
+ and "__*" not in pr._overlaps
+ and not self.prop.parent.is_sibling(pr.parent)
+ and not self.prop.mapper.is_sibling(pr.mapper)
+ and not self.prop.parent.is_sibling(pr.mapper)
+ and not self.prop.mapper.is_sibling(pr.parent)
+ and (
+ self.prop.key != pr.key
+ or not self.prop.parent.common_parent(pr.parent)
+ )
+ ):
+
+ other_props.append((pr, fr_))
+
+ if other_props:
+ util.warn(
+ "relationship '%s' will copy column %s to column %s, "
+ "which conflicts with relationship(s): %s. "
+ "If this is not the intention, consider if these "
+ "relationships should be linked with "
+ "back_populates, or if viewonly=True should be "
+ "applied to one or more if they are read-only. "
+ "For the less common case that foreign key "
+ "constraints are partially overlapping, the "
+ "orm.foreign() "
+ "annotation can be used to isolate the columns that "
+ "should be written towards. To silence this "
+ "warning, add the parameter 'overlaps=\"%s\"' to the "
+ "'%s' relationship."
+ % (
+ self.prop,
+ from_,
+ to_,
+ ", ".join(
+ sorted(
+ "'%s' (copies %s to %s)" % (pr, fr_, to_)
+ for (pr, fr_) in other_props
+ )
+ ),
+ ",".join(sorted(pr.key for pr, fr in other_props)),
+ self.prop,
+ ),
+ code="qzyx",
+ )
+ self._track_overlapping_sync_targets[to_][self.prop] = from_
+
+ @util.memoized_property
+ def remote_columns(self):
+ return self._gather_join_annotations("remote")
+
+ @util.memoized_property
+ def local_columns(self):
+ return self._gather_join_annotations("local")
+
+ @util.memoized_property
+ def foreign_key_columns(self):
+ return self._gather_join_annotations("foreign")
+
+ def _gather_join_annotations(self, annotation):
+ s = set(
+ self._gather_columns_with_annotation(self.primaryjoin, annotation)
+ )
+ if self.secondaryjoin is not None:
+ s.update(
+ self._gather_columns_with_annotation(
+ self.secondaryjoin, annotation
+ )
+ )
+ return {x._deannotate() for x in s}
+
+ def _gather_columns_with_annotation(self, clause, *annotation):
+ annotation = set(annotation)
+ return set(
+ [
+ col
+ for col in visitors.iterate(clause, {})
+ if annotation.issubset(col._annotations)
+ ]
+ )
+
+ def join_targets(
+ self,
+ source_selectable,
+ dest_selectable,
+ aliased,
+ single_crit=None,
+ extra_criteria=(),
+ ):
+ """Given a source and destination selectable, create a
+ join between them.
+
+ This takes into account aliasing the join clause
+ to reference the appropriate corresponding columns
+ in the target objects, as well as the extra child
+ criterion, equivalent column sets, etc.
+
+ """
+ # place a barrier on the destination such that
+ # replacement traversals won't ever dig into it.
+ # its internal structure remains fixed
+ # regardless of context.
+ dest_selectable = _shallow_annotate(
+ dest_selectable, {"no_replacement_traverse": True}
+ )
+
+ primaryjoin, secondaryjoin, secondary = (
+ self.primaryjoin,
+ self.secondaryjoin,
+ self.secondary,
+ )
+
+ # adjust the join condition for single table inheritance,
+ # in the case that the join is to a subclass
+ # this is analogous to the
+ # "_adjust_for_single_table_inheritance()" method in Query.
+
+ if single_crit is not None:
+ if secondaryjoin is not None:
+ secondaryjoin = secondaryjoin & single_crit
+ else:
+ primaryjoin = primaryjoin & single_crit
+
+ if extra_criteria:
+ if secondaryjoin is not None:
+ secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)
+ else:
+ primaryjoin = primaryjoin & sql.and_(*extra_criteria)
+
+ if aliased:
+ if secondary is not None:
+ secondary = secondary._anonymous_fromclause(flat=True)
+ primary_aliasizer = ClauseAdapter(
+ secondary, exclude_fn=_ColInAnnotations("local")
+ )
+ secondary_aliasizer = ClauseAdapter(
+ dest_selectable, equivalents=self.child_equivalents
+ ).chain(primary_aliasizer)
+ if source_selectable is not None:
+ primary_aliasizer = ClauseAdapter(
+ secondary, exclude_fn=_ColInAnnotations("local")
+ ).chain(
+ ClauseAdapter(
+ source_selectable,
+ equivalents=self.parent_equivalents,
+ )
+ )
+
+ secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
+ else:
+ primary_aliasizer = ClauseAdapter(
+ dest_selectable,
+ exclude_fn=_ColInAnnotations("local"),
+ equivalents=self.child_equivalents,
+ )
+ if source_selectable is not None:
+ primary_aliasizer.chain(
+ ClauseAdapter(
+ source_selectable,
+ exclude_fn=_ColInAnnotations("remote"),
+ equivalents=self.parent_equivalents,
+ )
+ )
+ secondary_aliasizer = None
+
+ primaryjoin = primary_aliasizer.traverse(primaryjoin)
+ target_adapter = secondary_aliasizer or primary_aliasizer
+ target_adapter.exclude_fn = None
+ else:
+ target_adapter = None
+ return (
+ primaryjoin,
+ secondaryjoin,
+ secondary,
+ target_adapter,
+ dest_selectable,
+ )
+
+ def create_lazy_clause(self, reverse_direction=False):
+ binds = util.column_dict()
+ equated_columns = util.column_dict()
+
+ has_secondary = self.secondaryjoin is not None
+
+ if has_secondary:
+ lookup = collections.defaultdict(list)
+ for l, r in self.local_remote_pairs:
+ lookup[l].append((l, r))
+ equated_columns[r] = l
+ elif not reverse_direction:
+ for l, r in self.local_remote_pairs:
+ equated_columns[r] = l
+ else:
+ for l, r in self.local_remote_pairs:
+ equated_columns[l] = r
+
+ def col_to_bind(col):
+
+ if (
+ (not reverse_direction and "local" in col._annotations)
+ or reverse_direction
+ and (
+ (has_secondary and col in lookup)
+ or (not has_secondary and "remote" in col._annotations)
+ )
+ ):
+ if col not in binds:
+ binds[col] = sql.bindparam(
+ None, None, type_=col.type, unique=True
+ )
+ return binds[col]
+ return None
+
+ lazywhere = self.primaryjoin
+ if self.secondaryjoin is None or not reverse_direction:
+ lazywhere = visitors.replacement_traverse(
+ lazywhere, {}, col_to_bind
+ )
+
+ if self.secondaryjoin is not None:
+ secondaryjoin = self.secondaryjoin
+ if reverse_direction:
+ secondaryjoin = visitors.replacement_traverse(
+ secondaryjoin, {}, col_to_bind
+ )
+ lazywhere = sql.and_(lazywhere, secondaryjoin)
+
+ bind_to_col = {binds[col].key: col for col in binds}
+
+ return lazywhere, bind_to_col, equated_columns
+
+
+class _ColInAnnotations(object):
+ """Serializable object that tests for a name in c._annotations."""
+
+ __slots__ = ("name",)
+
+ def __init__(self, name):
+ self.name = name
+
+ def __call__(self, c):
+ return self.name in c._annotations
diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py
new file mode 100644
index 0000000..f323233
--- /dev/null
+++ b/lib/sqlalchemy/orm/scoping.py
@@ -0,0 +1,228 @@
+# orm/scoping.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+from . import class_mapper
+from . import exc as orm_exc
+from .session import Session
+from .. import exc as sa_exc
+from ..util import create_proxy_methods
+from ..util import ScopedRegistry
+from ..util import ThreadLocalRegistry
+from ..util import warn
+from ..util import warn_deprecated
+
+__all__ = ["scoped_session", "ScopedSessionMixin"]
+
+
+class ScopedSessionMixin(object):
+ @property
+ def _proxied(self):
+ return self.registry()
+
+ def __call__(self, **kw):
+ r"""Return the current :class:`.Session`, creating it
+ using the :attr:`.scoped_session.session_factory` if not present.
+
+ :param \**kw: Keyword arguments will be passed to the
+ :attr:`.scoped_session.session_factory` callable, if an existing
+ :class:`.Session` is not present. If the :class:`.Session` is present
+ and keyword arguments have been passed,
+ :exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
+
+ """
+ if kw:
+ if self.registry.has():
+ raise sa_exc.InvalidRequestError(
+ "Scoped session is already present; "
+ "no new arguments may be specified."
+ )
+ else:
+ sess = self.session_factory(**kw)
+ self.registry.set(sess)
+ else:
+ sess = self.registry()
+ if not self._support_async and sess._is_asyncio:
+ warn_deprecated(
+ "Using `scoped_session` with asyncio is deprecated and "
+ "will raise an error in a future version. "
+ "Please use `async_scoped_session` instead.",
+ "1.4.23",
+ )
+ return sess
+
+ def configure(self, **kwargs):
+ """reconfigure the :class:`.sessionmaker` used by this
+ :class:`.scoped_session`.
+
+ See :meth:`.sessionmaker.configure`.
+
+ """
+
+ if self.registry.has():
+ warn(
+ "At least one scoped session is already present. "
+ " configure() can not affect sessions that have "
+ "already been created."
+ )
+
+ self.session_factory.configure(**kwargs)
+
+
+@create_proxy_methods(
+ Session,
+ ":class:`_orm.Session`",
+ ":class:`_orm.scoping.scoped_session`",
+ classmethods=["close_all", "object_session", "identity_key"],
+ methods=[
+ "__contains__",
+ "__iter__",
+ "add",
+ "add_all",
+ "begin",
+ "begin_nested",
+ "close",
+ "commit",
+ "connection",
+ "delete",
+ "execute",
+ "expire",
+ "expire_all",
+ "expunge",
+ "expunge_all",
+ "flush",
+ "get",
+ "get_bind",
+ "is_modified",
+ "bulk_save_objects",
+ "bulk_insert_mappings",
+ "bulk_update_mappings",
+ "merge",
+ "query",
+ "refresh",
+ "rollback",
+ "scalar",
+ "scalars",
+ ],
+ attributes=[
+ "bind",
+ "dirty",
+ "deleted",
+ "new",
+ "identity_map",
+ "is_active",
+ "autoflush",
+ "no_autoflush",
+ "info",
+ "autocommit",
+ ],
+)
+class scoped_session(ScopedSessionMixin):
+ """Provides scoped management of :class:`.Session` objects.
+
+ See :ref:`unitofwork_contextual` for a tutorial.
+
+ .. note::
+
+ When using :ref:`asyncio_toplevel`, the async-compatible
+ :class:`_asyncio.async_scoped_session` class should be
+ used in place of :class:`.scoped_session`.
+
+ """
+
+ _support_async = False
+
+ session_factory = None
+ """The `session_factory` provided to `__init__` is stored in this
+ attribute and may be accessed at a later time. This can be useful when
+ a new non-scoped :class:`.Session` or :class:`_engine.Connection` to the
+ database is needed."""
+
+ def __init__(self, session_factory, scopefunc=None):
+ """Construct a new :class:`.scoped_session`.
+
+ :param session_factory: a factory to create new :class:`.Session`
+ instances. This is usually, but not necessarily, an instance
+ of :class:`.sessionmaker`.
+ :param scopefunc: optional function which defines
+ the current scope. If not passed, the :class:`.scoped_session`
+ object assumes "thread-local" scope, and will use
+ a Python ``threading.local()`` in order to maintain the current
+ :class:`.Session`. If passed, the function should return
+ a hashable token; this token will be used as the key in a
+ dictionary in order to store and retrieve the current
+ :class:`.Session`.
+
+ """
+ self.session_factory = session_factory
+
+ if scopefunc:
+ self.registry = ScopedRegistry(session_factory, scopefunc)
+ else:
+ self.registry = ThreadLocalRegistry(session_factory)
+
+ def remove(self):
+ """Dispose of the current :class:`.Session`, if present.
+
+ This will first call :meth:`.Session.close` method
+ on the current :class:`.Session`, which releases any existing
+ transactional/connection resources still being held; transactions
+ specifically are rolled back. The :class:`.Session` is then
+ discarded. Upon next usage within the same scope,
+ the :class:`.scoped_session` will produce a new
+ :class:`.Session` object.
+
+ """
+
+ if self.registry.has():
+ self.registry().close()
+ self.registry.clear()
+
+ def query_property(self, query_cls=None):
+ """return a class property which produces a :class:`_query.Query`
+ object
+ against the class and the current :class:`.Session` when called.
+
+ e.g.::
+
+ Session = scoped_session(sessionmaker())
+
+ class MyClass(object):
+ query = Session.query_property()
+
+ # after mappers are defined
+ result = MyClass.query.filter(MyClass.name=='foo').all()
+
+ Produces instances of the session's configured query class by
+ default. To override and use a custom implementation, provide
+ a ``query_cls`` callable. The callable will be invoked with
+ the class's mapper as a positional argument and a session
+ keyword argument.
+
+ There is no limit to the number of query properties placed on
+ a class.
+
+ """
+
+ class query(object):
+ def __get__(s, instance, owner):
+ try:
+ mapper = class_mapper(owner)
+ if mapper:
+ if query_cls:
+ # custom query class
+ return query_cls(mapper, session=self.registry())
+ else:
+ # session's configured query class
+ return self.registry().query(mapper)
+ except orm_exc.UnmappedClassError:
+ return None
+
+ return query()
+
+
+ScopedSession = scoped_session
+"""Old name for backwards compatibility."""
diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py
new file mode 100644
index 0000000..c6a9169
--- /dev/null
+++ b/lib/sqlalchemy/orm/session.py
@@ -0,0 +1,4386 @@
+# orm/session.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+"""Provides the Session class and related utilities."""
+
+
+import itertools
+import sys
+import weakref
+
+from . import attributes
+from . import context
+from . import exc
+from . import identity
+from . import loading
+from . import persistence
+from . import query
+from . import state as statelib
+from .base import _class_to_mapper
+from .base import _none_set
+from .base import _state_mapper
+from .base import instance_str
+from .base import object_mapper
+from .base import object_state
+from .base import state_str
+from .unitofwork import UOWTransaction
+from .. import engine
+from .. import exc as sa_exc
+from .. import sql
+from .. import util
+from ..engine.util import TransactionalContext
+from ..inspection import inspect
+from ..sql import coercions
+from ..sql import dml
+from ..sql import roles
+from ..sql import visitors
+from ..sql.base import CompileState
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+
+__all__ = [
+ "Session",
+ "SessionTransaction",
+ "sessionmaker",
+ "ORMExecuteState",
+ "close_all_sessions",
+ "make_transient",
+ "make_transient_to_detached",
+ "object_session",
+]
+
+_sessions = weakref.WeakValueDictionary()
+"""Weak-referencing dictionary of :class:`.Session` objects.
+"""
+
+statelib._sessions = _sessions
+
+
+def _state_session(state):
+ """Given an :class:`.InstanceState`, return the :class:`.Session`
+ associated, if any.
+ """
+ return state.session
+
+
+class _SessionClassMethods(object):
+ """Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
+
+ @classmethod
+ @util.deprecated(
+ "1.3",
+ "The :meth:`.Session.close_all` method is deprecated and will be "
+ "removed in a future release. Please refer to "
+ ":func:`.session.close_all_sessions`.",
+ )
+ def close_all(cls):
+ """Close *all* sessions in memory."""
+
+ close_all_sessions()
+
+ @classmethod
+ @util.preload_module("sqlalchemy.orm.util")
+ def identity_key(cls, *args, **kwargs):
+ """Return an identity key.
+
+ This is an alias of :func:`.util.identity_key`.
+
+ """
+ return util.preloaded.orm_util.identity_key(*args, **kwargs)
+
+ @classmethod
+ def object_session(cls, instance):
+ """Return the :class:`.Session` to which an object belongs.
+
+ This is an alias of :func:`.object_session`.
+
+ """
+
+ return object_session(instance)
+
+
+ACTIVE = util.symbol("ACTIVE")
+PREPARED = util.symbol("PREPARED")
+COMMITTED = util.symbol("COMMITTED")
+DEACTIVE = util.symbol("DEACTIVE")
+CLOSED = util.symbol("CLOSED")
+
+
+class ORMExecuteState(util.MemoizedSlots):
+ """Represents a call to the :meth:`_orm.Session.execute` method, as passed
+ to the :meth:`.SessionEvents.do_orm_execute` event hook.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`session_execute_events` - top level documentation on how
+ to use :meth:`_orm.SessionEvents.do_orm_execute`
+
+ """
+
+ __slots__ = (
+ "session",
+ "statement",
+ "parameters",
+ "execution_options",
+ "local_execution_options",
+ "bind_arguments",
+ "_compile_state_cls",
+ "_starting_event_idx",
+ "_events_todo",
+ "_update_execution_options",
+ )
+
+ def __init__(
+ self,
+ session,
+ statement,
+ parameters,
+ execution_options,
+ bind_arguments,
+ compile_state_cls,
+ events_todo,
+ ):
+ self.session = session
+ self.statement = statement
+ self.parameters = parameters
+ self.local_execution_options = execution_options
+ self.execution_options = statement._execution_options.union(
+ execution_options
+ )
+ self.bind_arguments = bind_arguments
+ self._compile_state_cls = compile_state_cls
+ self._events_todo = list(events_todo)
+
+ def _remaining_events(self):
+ return self._events_todo[self._starting_event_idx + 1 :]
+
+ def invoke_statement(
+ self,
+ statement=None,
+ params=None,
+ execution_options=None,
+ bind_arguments=None,
+ ):
+ """Execute the statement represented by this
+ :class:`.ORMExecuteState`, without re-invoking events that have
+ already proceeded.
+
+ This method essentially performs a re-entrant execution of the current
+ statement for which the :meth:`.SessionEvents.do_orm_execute` event is
+ being currently invoked. The use case for this is for event handlers
+ that want to override how the ultimate
+ :class:`_engine.Result` object is returned, such as for schemes that
+ retrieve results from an offline cache or which concatenate results
+ from multiple executions.
+
+ When the :class:`_engine.Result` object is returned by the actual
+ handler function within :meth:`_orm.SessionEvents.do_orm_execute` and
+ is propagated to the calling
+ :meth:`_orm.Session.execute` method, the remainder of the
+ :meth:`_orm.Session.execute` method is preempted and the
+ :class:`_engine.Result` object is returned to the caller of
+ :meth:`_orm.Session.execute` immediately.
+
+ :param statement: optional statement to be invoked, in place of the
+ statement currently represented by :attr:`.ORMExecuteState.statement`.
+
+ :param params: optional dictionary of parameters which will be merged
+ into the existing :attr:`.ORMExecuteState.parameters` of this
+ :class:`.ORMExecuteState`.
+
+ :param execution_options: optional dictionary of execution options
+ will be merged into the existing
+ :attr:`.ORMExecuteState.execution_options` of this
+ :class:`.ORMExecuteState`.
+
+ :param bind_arguments: optional dictionary of bind_arguments
+ which will be merged amongst the current
+ :attr:`.ORMExecuteState.bind_arguments`
+ of this :class:`.ORMExecuteState`.
+
+ :return: a :class:`_engine.Result` object with ORM-level results.
+
+ .. seealso::
+
+ :ref:`do_orm_execute_re_executing` - background and examples on the
+ appropriate usage of :meth:`_orm.ORMExecuteState.invoke_statement`.
+
+
+ """
+
+ if statement is None:
+ statement = self.statement
+
+ _bind_arguments = dict(self.bind_arguments)
+ if bind_arguments:
+ _bind_arguments.update(bind_arguments)
+ _bind_arguments["_sa_skip_events"] = True
+
+ if params:
+ _params = dict(self.parameters)
+ _params.update(params)
+ else:
+ _params = self.parameters
+
+ _execution_options = self.local_execution_options
+ if execution_options:
+ _execution_options = _execution_options.union(execution_options)
+
+ return self.session.execute(
+ statement,
+ _params,
+ _execution_options,
+ _bind_arguments,
+ _parent_execute_state=self,
+ )
+
+ @property
+ def bind_mapper(self):
+ """Return the :class:`_orm.Mapper` that is the primary "bind" mapper.
+
+ For an :class:`_orm.ORMExecuteState` object invoking an ORM
+ statement, that is, the :attr:`_orm.ORMExecuteState.is_orm_statement`
+ attribute is ``True``, this attribute will return the
+ :class:`_orm.Mapper` that is considered to be the "primary" mapper
+ of the statement. The term "bind mapper" refers to the fact that
+ a :class:`_orm.Session` object may be "bound" to multiple
+ :class:`_engine.Engine` objects keyed to mapped classes, and the
+ "bind mapper" determines which of those :class:`_engine.Engine` objects
+ would be selected.
+
+ For a statement that is invoked against a single mapped class,
+ :attr:`_orm.ORMExecuteState.bind_mapper` is intended to be a reliable
+ way of getting this mapper.
+
+ .. versionadded:: 1.4.0b2
+
+ .. seealso::
+
+ :attr:`_orm.ORMExecuteState.all_mappers`
+
+
+ """
+ return self.bind_arguments.get("mapper", None)
+
+ @property
+ def all_mappers(self):
+ """Return a sequence of all :class:`_orm.Mapper` objects that are
+ involved at the top level of this statement.
+
+ By "top level" we mean those :class:`_orm.Mapper` objects that would
+ be represented in the result set rows for a :func:`_sql.select`
+ query, or for a :func:`_dml.update` or :func:`_dml.delete` query,
+ the mapper that is the main subject of the UPDATE or DELETE.
+
+ .. versionadded:: 1.4.0b2
+
+ .. seealso::
+
+ :attr:`_orm.ORMExecuteState.bind_mapper`
+
+
+
+ """
+ if not self.is_orm_statement:
+ return []
+ elif self.is_select:
+ result = []
+ seen = set()
+ for d in self.statement.column_descriptions:
+ ent = d["entity"]
+ if ent:
+ insp = inspect(ent, raiseerr=False)
+ if insp and insp.mapper and insp.mapper not in seen:
+ seen.add(insp.mapper)
+ result.append(insp.mapper)
+ return result
+ elif self.is_update or self.is_delete:
+ return [self.bind_mapper]
+ else:
+ return []
+
+ @property
+ def is_orm_statement(self):
+ """return True if the operation is an ORM statement.
+
+ This indicates that the select(), update(), or delete() being
+ invoked contains ORM entities as subjects. For a statement
+ that does not have ORM entities and instead refers only to
+ :class:`.Table` metadata, it is invoked as a Core SQL statement
+ and no ORM-level automation takes place.
+
+ """
+ return self._compile_state_cls is not None
+
+ @property
+ def is_select(self):
+ """return True if this is a SELECT operation."""
+ return self.statement.is_select
+
+ @property
+ def is_insert(self):
+ """return True if this is an INSERT operation."""
+ return self.statement.is_dml and self.statement.is_insert
+
+ @property
+ def is_update(self):
+ """return True if this is an UPDATE operation."""
+ return self.statement.is_dml and self.statement.is_update
+
+ @property
+ def is_delete(self):
+ """return True if this is a DELETE operation."""
+ return self.statement.is_dml and self.statement.is_delete
+
+ @property
+ def _is_crud(self):
+ return isinstance(self.statement, (dml.Update, dml.Delete))
+
+ def update_execution_options(self, **opts):
+ # TODO: no coverage
+ self.local_execution_options = self.local_execution_options.union(opts)
+
+ def _orm_compile_options(self):
+ if not self.is_select:
+ return None
+ opts = self.statement._compile_options
+ if opts.isinstance(context.ORMCompileState.default_compile_options):
+ return opts
+ else:
+ return None
+
+ @property
+ def lazy_loaded_from(self):
+ """An :class:`.InstanceState` that is using this statement execution
+ for a lazy load operation.
+
+ The primary rationale for this attribute is to support the horizontal
+ sharding extension, where it is available within specific query
+ execution time hooks created by this extension. To that end, the
+ attribute is only intended to be meaningful at **query execution
+ time**, and importantly not any time prior to that, including query
+ compilation time.
+
+ """
+ return self.load_options._lazy_loaded_from
+
+ @property
+ def loader_strategy_path(self):
+ """Return the :class:`.PathRegistry` for the current load path.
+
+ This object represents the "path" in a query along relationships
+ when a particular object or collection is being loaded.
+
+ """
+ opts = self._orm_compile_options()
+ if opts is not None:
+ return opts._current_path
+ else:
+ return None
+
+ @property
+ def is_column_load(self):
+ """Return True if the operation is refreshing column-oriented
+ attributes on an existing ORM object.
+
+ This occurs during operations such as :meth:`_orm.Session.refresh`,
+ as well as when an attribute deferred by :func:`_orm.defer` is
+ being loaded, or an attribute that was expired either directly
+ by :meth:`_orm.Session.expire` or via a commit operation is being
+ loaded.
+
+ Handlers will very likely not want to add any options to queries
+ when such an operation is occurring as the query should be a straight
+ primary key fetch which should not have any additional WHERE criteria,
+ and loader options travelling with the instance
+ will have already been added to the query.
+
+ .. versionadded:: 1.4.0b2
+
+ .. seealso::
+
+ :attr:`_orm.ORMExecuteState.is_relationship_load`
+
+ """
+ opts = self._orm_compile_options()
+ return opts is not None and opts._for_refresh_state
+
+ @property
+ def is_relationship_load(self):
+ """Return True if this load is loading objects on behalf of a
+ relationship.
+
+ This means, the loader in effect is either a LazyLoader,
+ SelectInLoader, SubqueryLoader, or similar, and the entire
+ SELECT statement being emitted is on behalf of a relationship
+ load.
+
+ Handlers will very likely not want to add any options to queries
+ when such an operation is occurring, as loader options are already
+ capable of being propagated to relationship loaders and should
+ be already present.
+
+ .. seealso::
+
+ :attr:`_orm.ORMExecuteState.is_column_load`
+
+ """
+ opts = self._orm_compile_options()
+ if opts is None:
+ return False
+ path = self.loader_strategy_path
+ return path is not None and not path.is_root
+
+ @property
+ def load_options(self):
+ """Return the load_options that will be used for this execution."""
+
+ if not self.is_select:
+ raise sa_exc.InvalidRequestError(
+ "This ORM execution is not against a SELECT statement "
+ "so there are no load options."
+ )
+ return self.execution_options.get(
+ "_sa_orm_load_options", context.QueryContext.default_load_options
+ )
+
+ @property
+ def update_delete_options(self):
+ """Return the update_delete_options that will be used for this
+ execution."""
+
+ if not self._is_crud:
+ raise sa_exc.InvalidRequestError(
+ "This ORM execution is not against an UPDATE or DELETE "
+ "statement so there are no update options."
+ )
+ return self.execution_options.get(
+ "_sa_orm_update_options",
+ persistence.BulkUDCompileState.default_update_options,
+ )
+
+ @property
+ def user_defined_options(self):
+ """The sequence of :class:`.UserDefinedOptions` that have been
+ associated with the statement being invoked.
+
+ """
+ return [
+ opt
+ for opt in self.statement._with_options
+ if not opt._is_compile_state and not opt._is_legacy_option
+ ]
+
+
+class SessionTransaction(TransactionalContext):
+ """A :class:`.Session`-level transaction.
+
+ :class:`.SessionTransaction` is produced from the
+ :meth:`_orm.Session.begin`
+ and :meth:`_orm.Session.begin_nested` methods. It's largely an internal
+ object that in modern use provides a context manager for session
+ transactions.
+
+ Documentation on interacting with :class:`_orm.SessionTransaction` is
+ at: :ref:`unitofwork_transaction`.
+
+
+ .. versionchanged:: 1.4 The scoping and API methods to work with the
+ :class:`_orm.SessionTransaction` object directly have been simplified.
+
+ .. seealso::
+
+ :ref:`unitofwork_transaction`
+
+ :meth:`.Session.begin`
+
+ :meth:`.Session.begin_nested`
+
+ :meth:`.Session.rollback`
+
+ :meth:`.Session.commit`
+
+ :meth:`.Session.in_transaction`
+
+ :meth:`.Session.in_nested_transaction`
+
+ :meth:`.Session.get_transaction`
+
+ :meth:`.Session.get_nested_transaction`
+
+
+ """
+
+ _rollback_exception = None
+
+ def __init__(
+ self,
+ session,
+ parent=None,
+ nested=False,
+ autobegin=False,
+ ):
+ TransactionalContext._trans_ctx_check(session)
+
+ self.session = session
+ self._connections = {}
+ self._parent = parent
+ self.nested = nested
+ if nested:
+ self._previous_nested_transaction = session._nested_transaction
+ self._state = ACTIVE
+ if not parent and nested:
+ raise sa_exc.InvalidRequestError(
+ "Can't start a SAVEPOINT transaction when no existing "
+ "transaction is in progress"
+ )
+
+ self._take_snapshot(autobegin=autobegin)
+
+ # make sure transaction is assigned before we call the
+ # dispatch
+ self.session._transaction = self
+
+ self.session.dispatch.after_transaction_create(self.session, self)
+
+ @property
+ def parent(self):
+ """The parent :class:`.SessionTransaction` of this
+ :class:`.SessionTransaction`.
+
+ If this attribute is ``None``, indicates this
+ :class:`.SessionTransaction` is at the top of the stack, and
+ corresponds to a real "COMMIT"/"ROLLBACK"
+ block. If non-``None``, then this is either a "subtransaction"
+ or a "nested" / SAVEPOINT transaction. If the
+ :attr:`.SessionTransaction.nested` attribute is ``True``, then
+ this is a SAVEPOINT, and if ``False``, indicates this a subtransaction.
+
+ .. versionadded:: 1.0.16 - use ._parent for previous versions
+
+ """
+ return self._parent
+
+ nested = False
+ """Indicates if this is a nested, or SAVEPOINT, transaction.
+
+ When :attr:`.SessionTransaction.nested` is True, it is expected
+ that :attr:`.SessionTransaction.parent` will be True as well.
+
+ """
+
+ @property
+ def is_active(self):
+ return self.session is not None and self._state is ACTIVE
+
+ def _assert_active(
+ self,
+ prepared_ok=False,
+ rollback_ok=False,
+ deactive_ok=False,
+ closed_msg="This transaction is closed",
+ ):
+ if self._state is COMMITTED:
+ raise sa_exc.InvalidRequestError(
+ "This session is in 'committed' state; no further "
+ "SQL can be emitted within this transaction."
+ )
+ elif self._state is PREPARED:
+ if not prepared_ok:
+ raise sa_exc.InvalidRequestError(
+ "This session is in 'prepared' state; no further "
+ "SQL can be emitted within this transaction."
+ )
+ elif self._state is DEACTIVE:
+ if not deactive_ok and not rollback_ok:
+ if self._rollback_exception:
+ raise sa_exc.PendingRollbackError(
+ "This Session's transaction has been rolled back "
+ "due to a previous exception during flush."
+ " To begin a new transaction with this Session, "
+ "first issue Session.rollback()."
+ " Original exception was: %s"
+ % self._rollback_exception,
+ code="7s2a",
+ )
+ elif not deactive_ok:
+ raise sa_exc.InvalidRequestError(
+ "This session is in 'inactive' state, due to the "
+ "SQL transaction being rolled back; no further "
+ "SQL can be emitted within this transaction."
+ )
+ elif self._state is CLOSED:
+ raise sa_exc.ResourceClosedError(closed_msg)
+
+ @property
+ def _is_transaction_boundary(self):
+ return self.nested or not self._parent
+
+ def connection(self, bindkey, execution_options=None, **kwargs):
+ self._assert_active()
+ bind = self.session.get_bind(bindkey, **kwargs)
+ return self._connection_for_bind(bind, execution_options)
+
+ def _begin(self, nested=False):
+ self._assert_active()
+ return SessionTransaction(self.session, self, nested=nested)
+
+ def _iterate_self_and_parents(self, upto=None):
+
+ current = self
+ result = ()
+ while current:
+ result += (current,)
+ if current._parent is upto:
+ break
+ elif current._parent is None:
+ raise sa_exc.InvalidRequestError(
+ "Transaction %s is not on the active transaction list"
+ % (upto)
+ )
+ else:
+ current = current._parent
+
+ return result
+
+ def _take_snapshot(self, autobegin=False):
+ if not self._is_transaction_boundary:
+ self._new = self._parent._new
+ self._deleted = self._parent._deleted
+ self._dirty = self._parent._dirty
+ self._key_switches = self._parent._key_switches
+ return
+
+ if not autobegin and not self.session._flushing:
+ self.session.flush()
+
+ self._new = weakref.WeakKeyDictionary()
+ self._deleted = weakref.WeakKeyDictionary()
+ self._dirty = weakref.WeakKeyDictionary()
+ self._key_switches = weakref.WeakKeyDictionary()
+
+ def _restore_snapshot(self, dirty_only=False):
+ """Restore the restoration state taken before a transaction began.
+
+ Corresponds to a rollback.
+
+ """
+ assert self._is_transaction_boundary
+
+ to_expunge = set(self._new).union(self.session._new)
+ self.session._expunge_states(to_expunge, to_transient=True)
+
+ for s, (oldkey, newkey) in self._key_switches.items():
+ # we probably can do this conditionally based on
+ # if we expunged or not, but safe_discard does that anyway
+ self.session.identity_map.safe_discard(s)
+
+ # restore the old key
+ s.key = oldkey
+
+ # now restore the object, but only if we didn't expunge
+ if s not in to_expunge:
+ self.session.identity_map.replace(s)
+
+ for s in set(self._deleted).union(self.session._deleted):
+ self.session._update_impl(s, revert_deletion=True)
+
+ assert not self.session._deleted
+
+ for s in self.session.identity_map.all_states():
+ if not dirty_only or s.modified or s in self._dirty:
+ s._expire(s.dict, self.session.identity_map._modified)
+
+ def _remove_snapshot(self):
+ """Remove the restoration state taken before a transaction began.
+
+ Corresponds to a commit.
+
+ """
+ assert self._is_transaction_boundary
+
+ if not self.nested and self.session.expire_on_commit:
+ for s in self.session.identity_map.all_states():
+ s._expire(s.dict, self.session.identity_map._modified)
+
+ statelib.InstanceState._detach_states(
+ list(self._deleted), self.session
+ )
+ self._deleted.clear()
+ elif self.nested:
+ self._parent._new.update(self._new)
+ self._parent._dirty.update(self._dirty)
+ self._parent._deleted.update(self._deleted)
+ self._parent._key_switches.update(self._key_switches)
+
+ def _connection_for_bind(self, bind, execution_options):
+ self._assert_active()
+
+ if bind in self._connections:
+ if execution_options:
+ util.warn(
+ "Connection is already established for the "
+ "given bind; execution_options ignored"
+ )
+ return self._connections[bind][0]
+
+ local_connect = False
+ should_commit = True
+
+ if self._parent:
+ conn = self._parent._connection_for_bind(bind, execution_options)
+ if not self.nested:
+ return conn
+ else:
+ if isinstance(bind, engine.Connection):
+ conn = bind
+ if conn.engine in self._connections:
+ raise sa_exc.InvalidRequestError(
+ "Session already has a Connection associated for the "
+ "given Connection's Engine"
+ )
+ else:
+ conn = bind.connect()
+ local_connect = True
+
+ try:
+ if execution_options:
+ conn = conn.execution_options(**execution_options)
+
+ if self.session.twophase and self._parent is None:
+ transaction = conn.begin_twophase()
+ elif self.nested:
+ transaction = conn.begin_nested()
+ elif conn.in_transaction():
+ # if given a future connection already in a transaction, don't
+ # commit that transaction unless it is a savepoint
+ if conn.in_nested_transaction():
+ transaction = conn.get_nested_transaction()
+ else:
+ transaction = conn.get_transaction()
+ should_commit = False
+ else:
+ transaction = conn.begin()
+ except:
+ # connection will not not be associated with this Session;
+ # close it immediately so that it isn't closed under GC
+ if local_connect:
+ conn.close()
+ raise
+ else:
+ bind_is_connection = isinstance(bind, engine.Connection)
+
+ self._connections[conn] = self._connections[conn.engine] = (
+ conn,
+ transaction,
+ should_commit,
+ not bind_is_connection,
+ )
+ self.session.dispatch.after_begin(self.session, self, conn)
+ return conn
+
+ def prepare(self):
+ if self._parent is not None or not self.session.twophase:
+ raise sa_exc.InvalidRequestError(
+ "'twophase' mode not enabled, or not root transaction; "
+ "can't prepare."
+ )
+ self._prepare_impl()
+
+ def _prepare_impl(self):
+ self._assert_active()
+ if self._parent is None or self.nested:
+ self.session.dispatch.before_commit(self.session)
+
+ stx = self.session._transaction
+ if stx is not self:
+ for subtransaction in stx._iterate_self_and_parents(upto=self):
+ subtransaction.commit()
+
+ if not self.session._flushing:
+ for _flush_guard in range(100):
+ if self.session._is_clean():
+ break
+ self.session.flush()
+ else:
+ raise exc.FlushError(
+ "Over 100 subsequent flushes have occurred within "
+ "session.commit() - is an after_flush() hook "
+ "creating new objects?"
+ )
+
+ if self._parent is None and self.session.twophase:
+ try:
+ for t in set(self._connections.values()):
+ t[1].prepare()
+ except:
+ with util.safe_reraise():
+ self.rollback()
+
+ self._state = PREPARED
+
+ def commit(self, _to_root=False):
+ self._assert_active(prepared_ok=True)
+ if self._state is not PREPARED:
+ self._prepare_impl()
+
+ if self._parent is None or self.nested:
+ for conn, trans, should_commit, autoclose in set(
+ self._connections.values()
+ ):
+ if should_commit:
+ trans.commit()
+
+ self._state = COMMITTED
+ self.session.dispatch.after_commit(self.session)
+
+ self._remove_snapshot()
+
+ self.close()
+
+ if _to_root and self._parent:
+ return self._parent.commit(_to_root=True)
+
+ return self._parent
+
+ def rollback(self, _capture_exception=False, _to_root=False):
+ self._assert_active(prepared_ok=True, rollback_ok=True)
+
+ stx = self.session._transaction
+ if stx is not self:
+ for subtransaction in stx._iterate_self_and_parents(upto=self):
+ subtransaction.close()
+
+ boundary = self
+ rollback_err = None
+ if self._state in (ACTIVE, PREPARED):
+ for transaction in self._iterate_self_and_parents():
+ if transaction._parent is None or transaction.nested:
+ try:
+ for t in set(transaction._connections.values()):
+ t[1].rollback()
+
+ transaction._state = DEACTIVE
+ self.session.dispatch.after_rollback(self.session)
+ except:
+ rollback_err = sys.exc_info()
+ finally:
+ transaction._state = DEACTIVE
+ transaction._restore_snapshot(
+ dirty_only=transaction.nested
+ )
+ boundary = transaction
+ break
+ else:
+ transaction._state = DEACTIVE
+
+ sess = self.session
+
+ if not rollback_err and not sess._is_clean():
+
+ # if items were added, deleted, or mutated
+ # here, we need to re-restore the snapshot
+ util.warn(
+ "Session's state has been changed on "
+ "a non-active transaction - this state "
+ "will be discarded."
+ )
+ boundary._restore_snapshot(dirty_only=boundary.nested)
+
+ self.close()
+
+ if self._parent and _capture_exception:
+ self._parent._rollback_exception = sys.exc_info()[1]
+
+ if rollback_err:
+ util.raise_(rollback_err[1], with_traceback=rollback_err[2])
+
+ sess.dispatch.after_soft_rollback(sess, self)
+
+ if _to_root and self._parent:
+ return self._parent.rollback(_to_root=True)
+ return self._parent
+
+ def close(self, invalidate=False):
+ if self.nested:
+ self.session._nested_transaction = (
+ self._previous_nested_transaction
+ )
+
+ self.session._transaction = self._parent
+
+ if self._parent is None:
+ for connection, transaction, should_commit, autoclose in set(
+ self._connections.values()
+ ):
+ if invalidate:
+ connection.invalidate()
+ if should_commit and transaction.is_active:
+ transaction.close()
+ if autoclose:
+ connection.close()
+
+ self._state = CLOSED
+ self.session.dispatch.after_transaction_end(self.session, self)
+
+ self.session = None
+ self._connections = None
+
+ def _get_subject(self):
+ return self.session
+
+ def _transaction_is_active(self):
+ return self._state is ACTIVE
+
+ def _transaction_is_closed(self):
+ return self._state is CLOSED
+
+ def _rollback_can_be_called(self):
+ return self._state not in (COMMITTED, CLOSED)
+
+
+class Session(_SessionClassMethods):
+ """Manages persistence operations for ORM-mapped objects.
+
+ The Session's usage paradigm is described at :doc:`/orm/session`.
+
+
+ """
+
+ _is_asyncio = False
+
+ @util.deprecated_params(
+ autocommit=(
+ "2.0",
+ "The :paramref:`.Session.autocommit` parameter is deprecated "
+ "and will be removed in SQLAlchemy version 2.0. The "
+ ':class:`_orm.Session` now features "autobegin" behavior '
+ "such that the :meth:`.Session.begin` method may be called "
+ "if a transaction has not yet been started yet. See the section "
+ ":ref:`session_explicit_begin` for background.",
+ ),
+ )
+ def __init__(
+ self,
+ bind=None,
+ autoflush=True,
+ future=False,
+ expire_on_commit=True,
+ autocommit=False,
+ twophase=False,
+ binds=None,
+ enable_baked_queries=True,
+ info=None,
+ query_cls=None,
+ ):
+ r"""Construct a new Session.
+
+ See also the :class:`.sessionmaker` function which is used to
+ generate a :class:`.Session`-producing callable with a given
+ set of arguments.
+
+ :param autocommit:
+ Defaults to ``False``. When ``True``, the
+ :class:`.Session` does not automatically begin transactions for
+ individual statement executions, will acquire connections from the
+ engine on an as-needed basis, releasing to the connection pool
+ after each statement. Flushes will begin and commit (or possibly
+ rollback) their own transaction if no transaction is present.
+ When using this mode, the
+ :meth:`.Session.begin` method may be used to explicitly start
+ transactions, but the usual "autobegin" behavior is not present.
+
+ :param autoflush: When ``True``, all query operations will issue a
+ :meth:`~.Session.flush` call to this ``Session`` before proceeding.
+ This is a convenience feature so that :meth:`~.Session.flush` need
+ not be called repeatedly in order for database queries to retrieve
+ results. It's typical that ``autoflush`` is used in conjunction
+ with ``autocommit=False``. In this scenario, explicit calls to
+ :meth:`~.Session.flush` are rarely needed; you usually only need to
+ call :meth:`~.Session.commit` (which flushes) to finalize changes.
+
+ .. seealso::
+
+ :ref:`session_flushing` - additional background on autoflush
+
+ :param bind: An optional :class:`_engine.Engine` or
+ :class:`_engine.Connection` to
+ which this ``Session`` should be bound. When specified, all SQL
+ operations performed by this session will execute via this
+ connectable.
+
+ :param binds: A dictionary which may specify any number of
+ :class:`_engine.Engine` or :class:`_engine.Connection`
+ objects as the source of
+ connectivity for SQL operations on a per-entity basis. The keys
+ of the dictionary consist of any series of mapped classes,
+ arbitrary Python classes that are bases for mapped classes,
+ :class:`_schema.Table` objects and :class:`_orm.Mapper` objects.
+ The
+ values of the dictionary are then instances of
+ :class:`_engine.Engine`
+ or less commonly :class:`_engine.Connection` objects.
+ Operations which
+ proceed relative to a particular mapped class will consult this
+ dictionary for the closest matching entity in order to determine
+ which :class:`_engine.Engine` should be used for a particular SQL
+ operation. The complete heuristics for resolution are
+ described at :meth:`.Session.get_bind`. Usage looks like::
+
+ Session = sessionmaker(binds={
+ SomeMappedClass: create_engine('postgresql://engine1'),
+ SomeDeclarativeBase: create_engine('postgresql://engine2'),
+ some_mapper: create_engine('postgresql://engine3'),
+ some_table: create_engine('postgresql://engine4'),
+ })
+
+ .. seealso::
+
+ :ref:`session_partitioning`
+
+ :meth:`.Session.bind_mapper`
+
+ :meth:`.Session.bind_table`
+
+ :meth:`.Session.get_bind`
+
+
+ :param \class_: Specify an alternate class other than
+ ``sqlalchemy.orm.session.Session`` which should be used by the
+ returned class. This is the only argument that is local to the
+ :class:`.sessionmaker` function, and is not sent directly to the
+ constructor for ``Session``.
+
+ :param enable_baked_queries: defaults to ``True``. A flag consumed
+ by the :mod:`sqlalchemy.ext.baked` extension to determine if
+ "baked queries" should be cached, as is the normal operation
+ of this extension. When set to ``False``, caching as used by
+ this particular extension is disabled.
+
+ .. versionchanged:: 1.4 The ``sqlalchemy.ext.baked`` extension is
+ legacy and is not used by any of SQLAlchemy's internals. This
+ flag therefore only affects applications that are making explicit
+ use of this extension within their own code.
+
+ :param expire_on_commit: Defaults to ``True``. When ``True``, all
+ instances will be fully expired after each :meth:`~.commit`,
+ so that all attribute/object access subsequent to a completed
+ transaction will load from the most recent database state.
+
+ .. seealso::
+
+ :ref:`session_committing`
+
+ :param future: if True, use 2.0 style transactional and engine
+ behavior. Future mode includes the following behaviors:
+
+ * The :class:`_orm.Session` will not use "bound" metadata in order
+ to locate an :class:`_engine.Engine`; the engine or engines in use
+ must be specified to the constructor of :class:`_orm.Session` or
+ otherwise be configured against the :class:`_orm.sessionmaker`
+ in use
+
+ * The "subtransactions" feature of :meth:`_orm.Session.begin` is
+ removed in version 2.0 and is disabled when the future flag is
+ set.
+
+ * The behavior of the :paramref:`_orm.relationship.cascade_backrefs`
+ flag on a :func:`_orm.relationship` will always assume
+ "False" behavior.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`migration_20_toplevel`
+
+ :param info: optional dictionary of arbitrary data to be associated
+ with this :class:`.Session`. Is available via the
+ :attr:`.Session.info` attribute. Note the dictionary is copied at
+ construction time so that modifications to the per-
+ :class:`.Session` dictionary will be local to that
+ :class:`.Session`.
+
+ :param query_cls: Class which should be used to create new Query
+ objects, as returned by the :meth:`~.Session.query` method.
+ Defaults to :class:`_query.Query`.
+
+ :param twophase: When ``True``, all transactions will be started as
+ a "two phase" transaction, i.e. using the "two phase" semantics
+ of the database in use along with an XID. During a
+ :meth:`~.commit`, after :meth:`~.flush` has been issued for all
+ attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
+ method on each database's :class:`.TwoPhaseTransaction` will be
+ called. This allows each database to roll back the entire
+ transaction, before each transaction is committed.
+
+ """
+ self.identity_map = identity.WeakInstanceDict()
+
+ self._new = {} # InstanceState->object, strong refs object
+ self._deleted = {} # same
+ self.bind = bind
+ self.__binds = {}
+ self._flushing = False
+ self._warn_on_events = False
+ self._transaction = None
+ self._nested_transaction = None
+ self.future = future
+ self.hash_key = _new_sessionid()
+ self.autoflush = autoflush
+ self.expire_on_commit = expire_on_commit
+ self.enable_baked_queries = enable_baked_queries
+
+ if autocommit:
+ if future:
+ raise sa_exc.ArgumentError(
+ "Cannot use autocommit mode with future=True."
+ )
+ self.autocommit = True
+ else:
+ self.autocommit = False
+
+ self.twophase = twophase
+ self._query_cls = query_cls if query_cls else query.Query
+ if info:
+ self.info.update(info)
+
+ if binds is not None:
+ for key, bind in binds.items():
+ self._add_bind(key, bind)
+
+ _sessions[self.hash_key] = self
+
+ # used by sqlalchemy.engine.util.TransactionalContext
+ _trans_context_manager = None
+
+ connection_callable = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_, value, traceback):
+ self.close()
+
+ @util.contextmanager
+ def _maker_context_manager(self):
+ with self:
+ with self.begin():
+ yield self
+
+ @property
+ @util.deprecated_20(
+ ":attr:`_orm.Session.transaction`",
+ alternative="For context manager use, use "
+ ":meth:`_orm.Session.begin`. To access "
+ "the current root transaction, use "
+ ":meth:`_orm.Session.get_transaction`.",
+ warn_on_attribute_access=True,
+ )
+ def transaction(self):
+ """The current active or inactive :class:`.SessionTransaction`.
+
+ May be None if no transaction has begun yet.
+
+ .. versionchanged:: 1.4 the :attr:`.Session.transaction` attribute
+ is now a read-only descriptor that also may return None if no
+ transaction has begun yet.
+
+
+ """
+ return self._legacy_transaction()
+
+ def _legacy_transaction(self):
+ if not self.future:
+ self._autobegin()
+ return self._transaction
+
+ def in_transaction(self):
+ """Return True if this :class:`_orm.Session` has begun a transaction.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :attr:`_orm.Session.is_active`
+
+
+ """
+ return self._transaction is not None
+
+ def in_nested_transaction(self):
+ """Return True if this :class:`_orm.Session` has begun a nested
+ transaction, e.g. SAVEPOINT.
+
+ .. versionadded:: 1.4
+
+ """
+ return self._nested_transaction is not None
+
+ def get_transaction(self):
+ """Return the current root transaction in progress, if any.
+
+ .. versionadded:: 1.4
+
+ """
+ trans = self._transaction
+ while trans is not None and trans._parent is not None:
+ trans = trans._parent
+ return trans
+
+ def get_nested_transaction(self):
+ """Return the current nested transaction in progress, if any.
+
+ .. versionadded:: 1.4
+
+ """
+
+ return self._nested_transaction
+
+ @util.memoized_property
+ def info(self):
+ """A user-modifiable dictionary.
+
+ The initial value of this dictionary can be populated using the
+ ``info`` argument to the :class:`.Session` constructor or
+ :class:`.sessionmaker` constructor or factory methods. The dictionary
+ here is always local to this :class:`.Session` and can be modified
+ independently of all other :class:`.Session` objects.
+
+ """
+ return {}
+
+ def _autobegin(self):
+ if not self.autocommit and self._transaction is None:
+
+ trans = SessionTransaction(self, autobegin=True)
+ assert self._transaction is trans
+ return True
+
+ return False
+
+ @util.deprecated_params(
+ subtransactions=(
+ "2.0",
+ "The :paramref:`_orm.Session.begin.subtransactions` flag is "
+ "deprecated and "
+ "will be removed in SQLAlchemy version 2.0. See "
+ "the documentation at :ref:`session_subtransactions` for "
+ "background on a compatible alternative pattern.",
+ )
+ )
+ def begin(self, subtransactions=False, nested=False, _subtrans=False):
+ """Begin a transaction, or nested transaction,
+ on this :class:`.Session`, if one is not already begun.
+
+ The :class:`_orm.Session` object features **autobegin** behavior,
+ so that normally it is not necessary to call the
+ :meth:`_orm.Session.begin`
+ method explicitly. However, it may be used in order to control
+ the scope of when the transactional state is begun.
+
+ When used to begin the outermost transaction, an error is raised
+ if this :class:`.Session` is already inside of a transaction.
+
+ :param nested: if True, begins a SAVEPOINT transaction and is
+ equivalent to calling :meth:`~.Session.begin_nested`. For
+ documentation on SAVEPOINT transactions, please see
+ :ref:`session_begin_nested`.
+
+ :param subtransactions: if True, indicates that this
+ :meth:`~.Session.begin` can create a "subtransaction".
+
+ :return: the :class:`.SessionTransaction` object. Note that
+ :class:`.SessionTransaction`
+ acts as a Python context manager, allowing :meth:`.Session.begin`
+ to be used in a "with" block. See :ref:`session_autocommit` for
+ an example.
+
+ .. seealso::
+
+ :ref:`session_autobegin`
+
+ :ref:`unitofwork_transaction`
+
+ :meth:`.Session.begin_nested`
+
+
+ """
+
+ if subtransactions and self.future:
+ raise NotImplementedError(
+ "subtransactions are not implemented in future "
+ "Session objects."
+ )
+
+ if self._autobegin():
+ if not subtransactions and not nested and not _subtrans:
+ return self._transaction
+
+ if self._transaction is not None:
+ if subtransactions or _subtrans or nested:
+ trans = self._transaction._begin(nested=nested)
+ assert self._transaction is trans
+ if nested:
+ self._nested_transaction = trans
+ else:
+ raise sa_exc.InvalidRequestError(
+ "A transaction is already begun on this Session."
+ )
+ elif not self.autocommit:
+ # outermost transaction. must be a not nested and not
+ # a subtransaction
+
+ assert not nested and not _subtrans and not subtransactions
+ trans = SessionTransaction(self)
+ assert self._transaction is trans
+ else:
+ # legacy autocommit mode
+ assert not self.future
+ trans = SessionTransaction(self, nested=nested)
+ assert self._transaction is trans
+
+ return self._transaction # needed for __enter__/__exit__ hook
+
+ def begin_nested(self):
+ """Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
+
+ The target database(s) and associated drivers must support SQL
+ SAVEPOINT for this method to function correctly.
+
+ For documentation on SAVEPOINT
+ transactions, please see :ref:`session_begin_nested`.
+
+ :return: the :class:`.SessionTransaction` object. Note that
+ :class:`.SessionTransaction` acts as a context manager, allowing
+ :meth:`.Session.begin_nested` to be used in a "with" block.
+ See :ref:`session_begin_nested` for a usage example.
+
+ .. seealso::
+
+ :ref:`session_begin_nested`
+
+ :ref:`pysqlite_serializable` - special workarounds required
+ with the SQLite driver in order for SAVEPOINT to work
+ correctly.
+
+ """
+ return self.begin(nested=True)
+
+ def rollback(self):
+ """Rollback the current transaction in progress.
+
+ If no transaction is in progress, this method is a pass-through.
+
+ In :term:`1.x-style` use, this method rolls back the topmost
+ database transaction if no nested transactions are in effect, or
+ to the current nested transaction if one is in effect.
+
+ When
+ :term:`2.0-style` use is in effect via the
+ :paramref:`_orm.Session.future` flag, the method always rolls back
+ the topmost database transaction, discarding any nested
+ transactions that may be in progress.
+
+ .. seealso::
+
+ :ref:`session_rollback`
+
+ :ref:`unitofwork_transaction`
+
+ """
+ if self._transaction is None:
+ pass
+ else:
+ self._transaction.rollback(_to_root=self.future)
+
+ def commit(self):
+ """Flush pending changes and commit the current transaction.
+
+ When the COMMIT operation is complete, all objects are fully
+ :term:`expired`, erasing their internal contents, which will be
+ automatically re-loaded when the objects are next accessed. In the
+ interim, these objects are in an expired state and will not function if
+ they are :term:`detached` from the :class:`.Session`. Additionally,
+ this re-load operation is not supported when using asyncio-oriented
+ APIs. The :paramref:`.Session.expire_on_commit` parameter may be used
+ to disable this behavior.
+
+ When there is no transaction in place for the :class:`.Session`,
+ indicating that no operations were invoked on this :class:`.Session`
+ since the previous call to :meth:`.Session.commit`, the method will
+ begin and commit an internal-only "logical" transaction, that does not
+ normally affect the database unless pending flush changes were
+ detected, but will still invoke event handlers and object expiration
+ rules.
+
+ If :term:`1.x-style` use is in effect and there are currently
+ SAVEPOINTs in progress via :meth:`_orm.Session.begin_nested`,
+ the operation will release the current SAVEPOINT but not commit
+ the outermost database transaction.
+
+ If :term:`2.0-style` use is in effect via the
+ :paramref:`_orm.Session.future` flag, the outermost database
+ transaction is committed unconditionally, automatically releasing any
+ SAVEPOINTs in effect.
+
+ When using legacy "autocommit" mode, this method is only
+ valid to call if a transaction is actually in progress, else
+ an error is raised. Similarly, when using legacy "subtransactions",
+ the method will instead close out the current "subtransaction",
+ rather than the actual database transaction, if a transaction
+ is in progress.
+
+ .. seealso::
+
+ :ref:`session_committing`
+
+ :ref:`unitofwork_transaction`
+
+ :ref:`asyncio_orm_avoid_lazyloads`
+
+ """
+ if self._transaction is None:
+ if not self._autobegin():
+ raise sa_exc.InvalidRequestError("No transaction is begun.")
+
+ self._transaction.commit(_to_root=self.future)
+
+ def prepare(self):
+ """Prepare the current transaction in progress for two phase commit.
+
+ If no transaction is in progress, this method raises an
+ :exc:`~sqlalchemy.exc.InvalidRequestError`.
+
+ Only root transactions of two phase sessions can be prepared. If the
+ current transaction is not such, an
+ :exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
+
+ """
+ if self._transaction is None:
+ if not self._autobegin():
+ raise sa_exc.InvalidRequestError("No transaction is begun.")
+
+ self._transaction.prepare()
+
+ def connection(
+ self,
+ bind_arguments=None,
+ close_with_result=False,
+ execution_options=None,
+ **kw
+ ):
+ r"""Return a :class:`_engine.Connection` object corresponding to this
+ :class:`.Session` object's transactional state.
+
+ If this :class:`.Session` is configured with ``autocommit=False``,
+ either the :class:`_engine.Connection` corresponding to the current
+ transaction is returned, or if no transaction is in progress, a new
+ one is begun and the :class:`_engine.Connection`
+ returned (note that no
+ transactional state is established with the DBAPI until the first
+ SQL statement is emitted).
+
+ Alternatively, if this :class:`.Session` is configured with
+ ``autocommit=True``, an ad-hoc :class:`_engine.Connection` is returned
+ using :meth:`_engine.Engine.connect` on the underlying
+ :class:`_engine.Engine`.
+
+ Ambiguity in multi-bind or unbound :class:`.Session` objects can be
+ resolved through any of the optional keyword arguments. This
+ ultimately makes usage of the :meth:`.get_bind` method for resolution.
+
+ :param bind_arguments: dictionary of bind arguments. May include
+ "mapper", "bind", "clause", other custom arguments that are passed
+ to :meth:`.Session.get_bind`.
+
+ :param bind:
+ deprecated; use bind_arguments
+
+ :param mapper:
+ deprecated; use bind_arguments
+
+ :param clause:
+ deprecated; use bind_arguments
+
+ :param close_with_result: Passed to :meth:`_engine.Engine.connect`,
+ indicating the :class:`_engine.Connection` should be considered
+ "single use", automatically closing when the first result set is
+ closed. This flag only has an effect if this :class:`.Session` is
+ configured with ``autocommit=True`` and does not already have a
+ transaction in progress.
+
+ .. deprecated:: 1.4 this parameter is deprecated and will be removed
+ in SQLAlchemy 2.0
+
+ :param execution_options: a dictionary of execution options that will
+ be passed to :meth:`_engine.Connection.execution_options`, **when the
+ connection is first procured only**. If the connection is already
+ present within the :class:`.Session`, a warning is emitted and
+ the arguments are ignored.
+
+ .. seealso::
+
+ :ref:`session_transaction_isolation`
+
+ :param \**kw:
+ deprecated; use bind_arguments
+
+ """
+
+ if not bind_arguments:
+ bind_arguments = kw
+
+ bind = bind_arguments.pop("bind", None)
+ if bind is None:
+ bind = self.get_bind(**bind_arguments)
+
+ return self._connection_for_bind(
+ bind,
+ close_with_result=close_with_result,
+ execution_options=execution_options,
+ )
+
+ def _connection_for_bind(self, engine, execution_options=None, **kw):
+ TransactionalContext._trans_ctx_check(self)
+
+ if self._transaction is not None or self._autobegin():
+ return self._transaction._connection_for_bind(
+ engine, execution_options
+ )
+
+ assert self._transaction is None
+ assert self.autocommit
+ conn = engine.connect(**kw)
+ if execution_options:
+ conn = conn.execution_options(**execution_options)
+ return conn
+
+ def execute(
+ self,
+ statement,
+ params=None,
+ execution_options=util.EMPTY_DICT,
+ bind_arguments=None,
+ _parent_execute_state=None,
+ _add_event=None,
+ **kw
+ ):
+ r"""Execute a SQL expression construct.
+
+ Returns a :class:`_engine.Result` object representing
+ results of the statement execution.
+
+ E.g.::
+
+ from sqlalchemy import select
+ result = session.execute(
+ select(User).where(User.id == 5)
+ )
+
+ The API contract of :meth:`_orm.Session.execute` is similar to that
+ of :meth:`_future.Connection.execute`, the :term:`2.0 style` version
+ of :class:`_future.Connection`.
+
+ .. versionchanged:: 1.4 the :meth:`_orm.Session.execute` method is
+ now the primary point of ORM statement execution when using
+ :term:`2.0 style` ORM usage.
+
+ :param statement:
+ An executable statement (i.e. an :class:`.Executable` expression
+ such as :func:`_expression.select`).
+
+ :param params:
+ Optional dictionary, or list of dictionaries, containing
+ bound parameter values. If a single dictionary, single-row
+ execution occurs; if a list of dictionaries, an
+ "executemany" will be invoked. The keys in each dictionary
+ must correspond to parameter names present in the statement.
+
+ :param execution_options: optional dictionary of execution options,
+ which will be associated with the statement execution. This
+ dictionary can provide a subset of the options that are accepted
+ by :meth:`_engine.Connection.execution_options`, and may also
+ provide additional options understood only in an ORM context.
+
+ :param bind_arguments: dictionary of additional arguments to determine
+ the bind. May include "mapper", "bind", or other custom arguments.
+ Contents of this dictionary are passed to the
+ :meth:`.Session.get_bind` method.
+
+ :param mapper:
+ deprecated; use the bind_arguments dictionary
+
+ :param bind:
+ deprecated; use the bind_arguments dictionary
+
+ :param \**kw:
+ deprecated; use the bind_arguments dictionary
+
+ :return: a :class:`_engine.Result` object.
+
+
+ """
+ statement = coercions.expect(roles.StatementRole, statement)
+
+ if kw:
+ util.warn_deprecated_20(
+ "Passing bind arguments to Session.execute() as keyword "
+ "arguments is deprecated and will be removed SQLAlchemy 2.0. "
+ "Please use the bind_arguments parameter."
+ )
+ if not bind_arguments:
+ bind_arguments = kw
+ else:
+ bind_arguments.update(kw)
+ elif not bind_arguments:
+ bind_arguments = {}
+
+ if (
+ statement._propagate_attrs.get("compile_state_plugin", None)
+ == "orm"
+ ):
+ # note that even without "future" mode, we need
+ compile_state_cls = CompileState._get_plugin_class_for_plugin(
+ statement, "orm"
+ )
+ else:
+ compile_state_cls = None
+
+ execution_options = util.coerce_to_immutabledict(execution_options)
+
+ if compile_state_cls is not None:
+ (
+ statement,
+ execution_options,
+ ) = compile_state_cls.orm_pre_session_exec(
+ self,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ _parent_execute_state is not None,
+ )
+ else:
+ bind_arguments.setdefault("clause", statement)
+ execution_options = execution_options.union(
+ {"future_result": True}
+ )
+
+ if _parent_execute_state:
+ events_todo = _parent_execute_state._remaining_events()
+ else:
+ events_todo = self.dispatch.do_orm_execute
+ if _add_event:
+ events_todo = list(events_todo) + [_add_event]
+
+ if events_todo:
+ orm_exec_state = ORMExecuteState(
+ self,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ compile_state_cls,
+ events_todo,
+ )
+ for idx, fn in enumerate(events_todo):
+ orm_exec_state._starting_event_idx = idx
+ result = fn(orm_exec_state)
+ if result:
+ return result
+
+ statement = orm_exec_state.statement
+ execution_options = orm_exec_state.local_execution_options
+
+ bind = self.get_bind(**bind_arguments)
+
+ if self.autocommit:
+ # legacy stuff, we can't use future_result w/ autocommit because
+ # we rely upon close_with_result, also legacy. it's all
+ # interrelated
+ conn = self._connection_for_bind(bind, close_with_result=True)
+ execution_options = execution_options.union(
+ dict(future_result=False)
+ )
+ else:
+ conn = self._connection_for_bind(bind)
+ result = conn._execute_20(statement, params or {}, execution_options)
+
+ if compile_state_cls:
+ result = compile_state_cls.orm_setup_cursor_result(
+ self,
+ statement,
+ params,
+ execution_options,
+ bind_arguments,
+ result,
+ )
+
+ return result
+
+ def scalar(
+ self,
+ statement,
+ params=None,
+ execution_options=util.EMPTY_DICT,
+ bind_arguments=None,
+ **kw
+ ):
+ """Execute a statement and return a scalar result.
+
+ Usage and parameters are the same as that of
+ :meth:`_orm.Session.execute`; the return result is a scalar Python
+ value.
+
+ """
+
+ return self.execute(
+ statement,
+ params=params,
+ execution_options=execution_options,
+ bind_arguments=bind_arguments,
+ **kw
+ ).scalar()
+
+ def scalars(
+ self,
+ statement,
+ params=None,
+ execution_options=util.EMPTY_DICT,
+ bind_arguments=None,
+ **kw
+ ):
+ """Execute a statement and return the results as scalars.
+
+ Usage and parameters are the same as that of
+ :meth:`_orm.Session.execute`; the return result is a
+ :class:`_result.ScalarResult` filtering object which
+ will return single elements rather than :class:`_row.Row` objects.
+
+ :return: a :class:`_result.ScalarResult` object
+
+ .. versionadded:: 1.4.24
+
+ """
+
+ return self.execute(
+ statement,
+ params=params,
+ execution_options=execution_options,
+ bind_arguments=bind_arguments,
+ **kw
+ ).scalars()
+
+ def close(self):
+ """Close out the transactional resources and ORM objects used by this
+ :class:`_orm.Session`.
+
+ This expunges all ORM objects associated with this
+ :class:`_orm.Session`, ends any transaction in progress and
+ :term:`releases` any :class:`_engine.Connection` objects which this
+ :class:`_orm.Session` itself has checked out from associated
+ :class:`_engine.Engine` objects. The operation then leaves the
+ :class:`_orm.Session` in a state which it may be used again.
+
+ .. tip::
+
+ The :meth:`_orm.Session.close` method **does not prevent the
+ Session from being used again**. The :class:`_orm.Session` itself
+ does not actually have a distinct "closed" state; it merely means
+ the :class:`_orm.Session` will release all database connections
+ and ORM objects.
+
+ .. versionchanged:: 1.4 The :meth:`.Session.close` method does not
+ immediately create a new :class:`.SessionTransaction` object;
+ instead, the new :class:`.SessionTransaction` is created only if
+ the :class:`.Session` is used again for a database operation.
+
+ .. seealso::
+
+ :ref:`session_closing` - detail on the semantics of
+ :meth:`_orm.Session.close`
+
+ """
+ self._close_impl(invalidate=False)
+
+ def invalidate(self):
+ """Close this Session, using connection invalidation.
+
+ This is a variant of :meth:`.Session.close` that will additionally
+ ensure that the :meth:`_engine.Connection.invalidate`
+ method will be called on each :class:`_engine.Connection` object
+ that is currently in use for a transaction (typically there is only
+ one connection unless the :class:`_orm.Session` is used with
+ multiple engines).
+
+ This can be called when the database is known to be in a state where
+ the connections are no longer safe to be used.
+
+ Below illustrates a scenario when using `gevent
+ <https://www.gevent.org/>`_, which can produce ``Timeout`` exceptions
+ that may mean the underlying connection should be discarded::
+
+ import gevent
+
+ try:
+ sess = Session()
+ sess.add(User())
+ sess.commit()
+ except gevent.Timeout:
+ sess.invalidate()
+ raise
+ except:
+ sess.rollback()
+ raise
+
+ The method additionally does everything that :meth:`_orm.Session.close`
+ does, including that all ORM objects are expunged.
+
+ """
+ self._close_impl(invalidate=True)
+
+ def _close_impl(self, invalidate):
+ self.expunge_all()
+ if self._transaction is not None:
+ for transaction in self._transaction._iterate_self_and_parents():
+ transaction.close(invalidate)
+
+ def expunge_all(self):
+ """Remove all object instances from this ``Session``.
+
+ This is equivalent to calling ``expunge(obj)`` on all objects in this
+ ``Session``.
+
+ """
+
+ all_states = self.identity_map.all_states() + list(self._new)
+ self.identity_map._kill()
+ self.identity_map = identity.WeakInstanceDict()
+ self._new = {}
+ self._deleted = {}
+
+ statelib.InstanceState._detach_states(all_states, self)
+
+ def _add_bind(self, key, bind):
+ try:
+ insp = inspect(key)
+ except sa_exc.NoInspectionAvailable as err:
+ if not isinstance(key, type):
+ util.raise_(
+ sa_exc.ArgumentError(
+ "Not an acceptable bind target: %s" % key
+ ),
+ replace_context=err,
+ )
+ else:
+ self.__binds[key] = bind
+ else:
+ if insp.is_selectable:
+ self.__binds[insp] = bind
+ elif insp.is_mapper:
+ self.__binds[insp.class_] = bind
+ for _selectable in insp._all_tables:
+ self.__binds[_selectable] = bind
+ else:
+ raise sa_exc.ArgumentError(
+ "Not an acceptable bind target: %s" % key
+ )
+
+ def bind_mapper(self, mapper, bind):
+ """Associate a :class:`_orm.Mapper` or arbitrary Python class with a
+ "bind", e.g. an :class:`_engine.Engine` or
+ :class:`_engine.Connection`.
+
+ The given entity is added to a lookup used by the
+ :meth:`.Session.get_bind` method.
+
+ :param mapper: a :class:`_orm.Mapper` object,
+ or an instance of a mapped
+ class, or any Python class that is the base of a set of mapped
+ classes.
+
+ :param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
+ object.
+
+ .. seealso::
+
+ :ref:`session_partitioning`
+
+ :paramref:`.Session.binds`
+
+ :meth:`.Session.bind_table`
+
+
+ """
+ self._add_bind(mapper, bind)
+
+ def bind_table(self, table, bind):
+ """Associate a :class:`_schema.Table` with a "bind", e.g. an
+ :class:`_engine.Engine`
+ or :class:`_engine.Connection`.
+
+ The given :class:`_schema.Table` is added to a lookup used by the
+ :meth:`.Session.get_bind` method.
+
+ :param table: a :class:`_schema.Table` object,
+ which is typically the target
+ of an ORM mapping, or is present within a selectable that is
+ mapped.
+
+ :param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
+ object.
+
+ .. seealso::
+
+ :ref:`session_partitioning`
+
+ :paramref:`.Session.binds`
+
+ :meth:`.Session.bind_mapper`
+
+
+ """
+ self._add_bind(table, bind)
+
+ def get_bind(
+ self,
+ mapper=None,
+ clause=None,
+ bind=None,
+ _sa_skip_events=None,
+ _sa_skip_for_implicit_returning=False,
+ ):
+ """Return a "bind" to which this :class:`.Session` is bound.
+
+ The "bind" is usually an instance of :class:`_engine.Engine`,
+ except in the case where the :class:`.Session` has been
+ explicitly bound directly to a :class:`_engine.Connection`.
+
+ For a multiply-bound or unbound :class:`.Session`, the
+ ``mapper`` or ``clause`` arguments are used to determine the
+ appropriate bind to return.
+
+ Note that the "mapper" argument is usually present
+ when :meth:`.Session.get_bind` is called via an ORM
+ operation such as a :meth:`.Session.query`, each
+ individual INSERT/UPDATE/DELETE operation within a
+ :meth:`.Session.flush`, call, etc.
+
+ The order of resolution is:
+
+ 1. if mapper given and :paramref:`.Session.binds` is present,
+ locate a bind based first on the mapper in use, then
+ on the mapped class in use, then on any base classes that are
+ present in the ``__mro__`` of the mapped class, from more specific
+ superclasses to more general.
+ 2. if clause given and ``Session.binds`` is present,
+ locate a bind based on :class:`_schema.Table` objects
+ found in the given clause present in ``Session.binds``.
+ 3. if ``Session.binds`` is present, return that.
+ 4. if clause given, attempt to return a bind
+ linked to the :class:`_schema.MetaData` ultimately
+ associated with the clause.
+ 5. if mapper given, attempt to return a bind
+ linked to the :class:`_schema.MetaData` ultimately
+ associated with the :class:`_schema.Table` or other
+ selectable to which the mapper is mapped.
+ 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
+ is raised.
+
+ Note that the :meth:`.Session.get_bind` method can be overridden on
+ a user-defined subclass of :class:`.Session` to provide any kind
+ of bind resolution scheme. See the example at
+ :ref:`session_custom_partitioning`.
+
+ :param mapper:
+ Optional :func:`.mapper` mapped class or instance of
+ :class:`_orm.Mapper`. The bind can be derived from a
+ :class:`_orm.Mapper`
+ first by consulting the "binds" map associated with this
+ :class:`.Session`, and secondly by consulting the
+ :class:`_schema.MetaData`
+ associated with the :class:`_schema.Table` to which the
+ :class:`_orm.Mapper`
+ is mapped for a bind.
+
+ :param clause:
+ A :class:`_expression.ClauseElement` (i.e.
+ :func:`_expression.select`,
+ :func:`_expression.text`,
+ etc.). If the ``mapper`` argument is not present or could not
+ produce a bind, the given expression construct will be searched
+ for a bound element, typically a :class:`_schema.Table`
+ associated with
+ bound :class:`_schema.MetaData`.
+
+ .. seealso::
+
+ :ref:`session_partitioning`
+
+ :paramref:`.Session.binds`
+
+ :meth:`.Session.bind_mapper`
+
+ :meth:`.Session.bind_table`
+
+ """
+
+ # this function is documented as a subclassing hook, so we have
+ # to call this method even if the return is simple
+ if bind:
+ return bind
+ elif not self.__binds and self.bind:
+ # simplest and most common case, we have a bind and no
+ # per-mapper/table binds, we're done
+ return self.bind
+
+ # we don't have self.bind and either have self.__binds
+ # or we don't have self.__binds (which is legacy). Look at the
+ # mapper and the clause
+ if mapper is clause is None:
+ if self.bind:
+ return self.bind
+ else:
+ raise sa_exc.UnboundExecutionError(
+ "This session is not bound to a single Engine or "
+ "Connection, and no context was provided to locate "
+ "a binding."
+ )
+
+ # look more closely at the mapper.
+ if mapper is not None:
+ try:
+ mapper = inspect(mapper)
+ except sa_exc.NoInspectionAvailable as err:
+ if isinstance(mapper, type):
+ util.raise_(
+ exc.UnmappedClassError(mapper),
+ replace_context=err,
+ )
+ else:
+ raise
+
+ # match up the mapper or clause in the __binds
+ if self.__binds:
+ # matching mappers and selectables to entries in the
+ # binds dictionary; supported use case.
+ if mapper:
+ for cls in mapper.class_.__mro__:
+ if cls in self.__binds:
+ return self.__binds[cls]
+ if clause is None:
+ clause = mapper.persist_selectable
+
+ if clause is not None:
+ plugin_subject = clause._propagate_attrs.get(
+ "plugin_subject", None
+ )
+
+ if plugin_subject is not None:
+ for cls in plugin_subject.mapper.class_.__mro__:
+ if cls in self.__binds:
+ return self.__binds[cls]
+
+ for obj in visitors.iterate(clause):
+ if obj in self.__binds:
+ return self.__binds[obj]
+
+ # none of the __binds matched, but we have a fallback bind.
+ # return that
+ if self.bind:
+ return self.bind
+
+ # now we are in legacy territory. looking for "bind" on tables
+ # that are via bound metadata. this goes away in 2.0.
+
+ future_msg = ""
+ future_code = ""
+
+ if mapper and clause is None:
+ clause = mapper.persist_selectable
+
+ if clause is not None:
+ if clause.bind:
+ if self.future:
+ future_msg = (
+ " A bind was located via legacy bound metadata, but "
+ "since future=True is set on this Session, this "
+ "bind is ignored."
+ )
+ else:
+ util.warn_deprecated_20(
+ "This Session located a target engine via bound "
+ "metadata; as this functionality will be removed in "
+ "SQLAlchemy 2.0, an Engine object should be passed "
+ "to the Session() constructor directly."
+ )
+ return clause.bind
+
+ if mapper:
+ if mapper.persist_selectable.bind:
+ if self.future:
+ future_msg = (
+ " A bind was located via legacy bound metadata, but "
+ "since future=True is set on this Session, this "
+ "bind is ignored."
+ )
+ else:
+ util.warn_deprecated_20(
+ "This Session located a target engine via bound "
+ "metadata; as this functionality will be removed in "
+ "SQLAlchemy 2.0, an Engine object should be passed "
+ "to the Session() constructor directly."
+ )
+ return mapper.persist_selectable.bind
+
+ context = []
+ if mapper is not None:
+ context.append("mapper %s" % mapper)
+ if clause is not None:
+ context.append("SQL expression")
+
+ raise sa_exc.UnboundExecutionError(
+ "Could not locate a bind configured on %s or this Session.%s"
+ % (", ".join(context), future_msg),
+ code=future_code,
+ )
+
+ def query(self, *entities, **kwargs):
+ """Return a new :class:`_query.Query` object corresponding to this
+ :class:`_orm.Session`.
+
+ """
+
+ return self._query_cls(entities, self, **kwargs)
+
+ def _identity_lookup(
+ self,
+ mapper,
+ primary_key_identity,
+ identity_token=None,
+ passive=attributes.PASSIVE_OFF,
+ lazy_loaded_from=None,
+ ):
+ """Locate an object in the identity map.
+
+ Given a primary key identity, constructs an identity key and then
+ looks in the session's identity map. If present, the object may
+ be run through unexpiration rules (e.g. load unloaded attributes,
+ check if was deleted).
+
+ e.g.::
+
+ obj = session._identity_lookup(inspect(SomeClass), (1, ))
+
+ :param mapper: mapper in use
+ :param primary_key_identity: the primary key we are searching for, as
+ a tuple.
+ :param identity_token: identity token that should be used to create
+ the identity key. Used as is, however overriding subclasses can
+ repurpose this in order to interpret the value in a special way,
+ such as if None then look among multiple target tokens.
+ :param passive: passive load flag passed to
+ :func:`.loading.get_from_identity`, which impacts the behavior if
+ the object is found; the object may be validated and/or unexpired
+ if the flag allows for SQL to be emitted.
+ :param lazy_loaded_from: an :class:`.InstanceState` that is
+ specifically asking for this identity as a related identity. Used
+ for sharding schemes where there is a correspondence between an object
+ and a related object being lazy-loaded (or otherwise
+ relationship-loaded).
+
+ :return: None if the object is not found in the identity map, *or*
+ if the object was unexpired and found to have been deleted.
+ if passive flags disallow SQL and the object is expired, returns
+ PASSIVE_NO_RESULT. In all other cases the instance is returned.
+
+ .. versionchanged:: 1.4.0 - the :meth:`.Session._identity_lookup`
+ method was moved from :class:`_query.Query` to
+ :class:`.Session`, to avoid having to instantiate the
+ :class:`_query.Query` object.
+
+
+ """
+
+ key = mapper.identity_key_from_primary_key(
+ primary_key_identity, identity_token=identity_token
+ )
+ return loading.get_from_identity(self, mapper, key, passive)
+
+ @property
+ @util.contextmanager
+ def no_autoflush(self):
+ """Return a context manager that disables autoflush.
+
+ e.g.::
+
+ with session.no_autoflush:
+
+ some_object = SomeClass()
+ session.add(some_object)
+ # won't autoflush
+ some_object.related_thing = session.query(SomeRelated).first()
+
+ Operations that proceed within the ``with:`` block
+ will not be subject to flushes occurring upon query
+ access. This is useful when initializing a series
+ of objects which involve existing database queries,
+ where the uncompleted object should not yet be flushed.
+
+ """
+ autoflush = self.autoflush
+ self.autoflush = False
+ try:
+ yield self
+ finally:
+ self.autoflush = autoflush
+
+ def _autoflush(self):
+ if self.autoflush and not self._flushing:
+ try:
+ self.flush()
+ except sa_exc.StatementError as e:
+ # note we are reraising StatementError as opposed to
+ # raising FlushError with "chaining" to remain compatible
+ # with code that catches StatementError, IntegrityError,
+ # etc.
+ e.add_detail(
+ "raised as a result of Query-invoked autoflush; "
+ "consider using a session.no_autoflush block if this "
+ "flush is occurring prematurely"
+ )
+ util.raise_(e, with_traceback=sys.exc_info()[2])
+
+ def refresh(self, instance, attribute_names=None, with_for_update=None):
+ """Expire and refresh attributes on the given instance.
+
+ The selected attributes will first be expired as they would when using
+ :meth:`_orm.Session.expire`; then a SELECT statement will be issued to
+ the database to refresh column-oriented attributes with the current
+ value available in the current transaction.
+
+ :func:`_orm.relationship` oriented attributes will also be immediately
+ loaded if they were already eagerly loaded on the object, using the
+ same eager loading strategy that they were loaded with originally.
+ Unloaded relationship attributes will remain unloaded, as will
+ relationship attributes that were originally lazy loaded.
+
+ .. versionadded:: 1.4 - the :meth:`_orm.Session.refresh` method
+ can also refresh eagerly loaded attributes.
+
+ .. tip::
+
+ While the :meth:`_orm.Session.refresh` method is capable of
+ refreshing both column and relationship oriented attributes, its
+ primary focus is on refreshing of local column-oriented attributes
+ on a single instance. For more open ended "refresh" functionality,
+ including the ability to refresh the attributes on many objects at
+ once while having explicit control over relationship loader
+ strategies, use the
+ :ref:`populate existing <orm_queryguide_populate_existing>` feature
+ instead.
+
+ Note that a highly isolated transaction will return the same values as
+ were previously read in that same transaction, regardless of changes
+ in database state outside of that transaction. Refreshing
+ attributes usually only makes sense at the start of a transaction
+ where database rows have not yet been accessed.
+
+ :param attribute_names: optional. An iterable collection of
+ string attribute names indicating a subset of attributes to
+ be refreshed.
+
+ :param with_for_update: optional boolean ``True`` indicating FOR UPDATE
+ should be used, or may be a dictionary containing flags to
+ indicate a more specific set of FOR UPDATE flags for the SELECT;
+ flags should match the parameters of
+ :meth:`_query.Query.with_for_update`.
+ Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
+
+ .. seealso::
+
+ :ref:`session_expire` - introductory material
+
+ :meth:`.Session.expire`
+
+ :meth:`.Session.expire_all`
+
+ :ref:`orm_queryguide_populate_existing` - allows any ORM query
+ to refresh objects as they would be loaded normally.
+
+ """
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+
+ self._expire_state(state, attribute_names)
+
+ if with_for_update == {}:
+ raise sa_exc.ArgumentError(
+ "with_for_update should be the boolean value "
+ "True, or a dictionary with options. "
+ "A blank dictionary is ambiguous."
+ )
+
+ with_for_update = query.ForUpdateArg._from_argument(with_for_update)
+
+ stmt = sql.select(object_mapper(instance))
+ if (
+ loading.load_on_ident(
+ self,
+ stmt,
+ state.key,
+ refresh_state=state,
+ with_for_update=with_for_update,
+ only_load_props=attribute_names,
+ )
+ is None
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Could not refresh instance '%s'" % instance_str(instance)
+ )
+
+ def expire_all(self):
+ """Expires all persistent instances within this Session.
+
+ When any attributes on a persistent instance is next accessed,
+ a query will be issued using the
+ :class:`.Session` object's current transactional context in order to
+ load all expired attributes for the given instance. Note that
+ a highly isolated transaction will return the same values as were
+ previously read in that same transaction, regardless of changes
+ in database state outside of that transaction.
+
+ To expire individual objects and individual attributes
+ on those objects, use :meth:`Session.expire`.
+
+ The :class:`.Session` object's default behavior is to
+ expire all state whenever the :meth:`Session.rollback`
+ or :meth:`Session.commit` methods are called, so that new
+ state can be loaded for the new transaction. For this reason,
+ calling :meth:`Session.expire_all` should not be needed when
+ autocommit is ``False``, assuming the transaction is isolated.
+
+ .. seealso::
+
+ :ref:`session_expire` - introductory material
+
+ :meth:`.Session.expire`
+
+ :meth:`.Session.refresh`
+
+ :meth:`_orm.Query.populate_existing`
+
+ """
+ for state in self.identity_map.all_states():
+ state._expire(state.dict, self.identity_map._modified)
+
+ def expire(self, instance, attribute_names=None):
+ """Expire the attributes on an instance.
+
+ Marks the attributes of an instance as out of date. When an expired
+ attribute is next accessed, a query will be issued to the
+ :class:`.Session` object's current transactional context in order to
+ load all expired attributes for the given instance. Note that
+ a highly isolated transaction will return the same values as were
+ previously read in that same transaction, regardless of changes
+ in database state outside of that transaction.
+
+ To expire all objects in the :class:`.Session` simultaneously,
+ use :meth:`Session.expire_all`.
+
+ The :class:`.Session` object's default behavior is to
+ expire all state whenever the :meth:`Session.rollback`
+ or :meth:`Session.commit` methods are called, so that new
+ state can be loaded for the new transaction. For this reason,
+ calling :meth:`Session.expire` only makes sense for the specific
+ case that a non-ORM SQL statement was emitted in the current
+ transaction.
+
+ :param instance: The instance to be refreshed.
+ :param attribute_names: optional list of string attribute names
+ indicating a subset of attributes to be expired.
+
+ .. seealso::
+
+ :ref:`session_expire` - introductory material
+
+ :meth:`.Session.expire`
+
+ :meth:`.Session.refresh`
+
+ :meth:`_orm.Query.populate_existing`
+
+ """
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+ self._expire_state(state, attribute_names)
+
+ def _expire_state(self, state, attribute_names):
+ self._validate_persistent(state)
+ if attribute_names:
+ state._expire_attributes(state.dict, attribute_names)
+ else:
+ # pre-fetch the full cascade since the expire is going to
+ # remove associations
+ cascaded = list(
+ state.manager.mapper.cascade_iterator("refresh-expire", state)
+ )
+ self._conditional_expire(state)
+ for o, m, st_, dct_ in cascaded:
+ self._conditional_expire(st_)
+
+ def _conditional_expire(self, state, autoflush=None):
+ """Expire a state if persistent, else expunge if pending"""
+
+ if state.key:
+ state._expire(state.dict, self.identity_map._modified)
+ elif state in self._new:
+ self._new.pop(state)
+ state._detach(self)
+
+ def expunge(self, instance):
+ """Remove the `instance` from this ``Session``.
+
+ This will free all internal references to the instance. Cascading
+ will be applied according to the *expunge* cascade rule.
+
+ """
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+ if state.session_id is not self.hash_key:
+ raise sa_exc.InvalidRequestError(
+ "Instance %s is not present in this Session" % state_str(state)
+ )
+
+ cascaded = list(
+ state.manager.mapper.cascade_iterator("expunge", state)
+ )
+ self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
+
+ def _expunge_states(self, states, to_transient=False):
+ for state in states:
+ if state in self._new:
+ self._new.pop(state)
+ elif self.identity_map.contains_state(state):
+ self.identity_map.safe_discard(state)
+ self._deleted.pop(state, None)
+ elif self._transaction:
+ # state is "detached" from being deleted, but still present
+ # in the transaction snapshot
+ self._transaction._deleted.pop(state, None)
+ statelib.InstanceState._detach_states(
+ states, self, to_transient=to_transient
+ )
+
+ def _register_persistent(self, states):
+ """Register all persistent objects from a flush.
+
+ This is used both for pending objects moving to the persistent
+ state as well as already persistent objects.
+
+ """
+
+ pending_to_persistent = self.dispatch.pending_to_persistent or None
+ for state in states:
+ mapper = _state_mapper(state)
+
+ # prevent against last minute dereferences of the object
+ obj = state.obj()
+ if obj is not None:
+
+ instance_key = mapper._identity_key_from_state(state)
+
+ if (
+ _none_set.intersection(instance_key[1])
+ and not mapper.allow_partial_pks
+ or _none_set.issuperset(instance_key[1])
+ ):
+ raise exc.FlushError(
+ "Instance %s has a NULL identity key. If this is an "
+ "auto-generated value, check that the database table "
+ "allows generation of new primary key values, and "
+ "that the mapped Column object is configured to "
+ "expect these generated values. Ensure also that "
+ "this flush() is not occurring at an inappropriate "
+ "time, such as within a load() event."
+ % state_str(state)
+ )
+
+ if state.key is None:
+ state.key = instance_key
+ elif state.key != instance_key:
+ # primary key switch. use safe_discard() in case another
+ # state has already replaced this one in the identity
+ # map (see test/orm/test_naturalpks.py ReversePKsTest)
+ self.identity_map.safe_discard(state)
+ if state in self._transaction._key_switches:
+ orig_key = self._transaction._key_switches[state][0]
+ else:
+ orig_key = state.key
+ self._transaction._key_switches[state] = (
+ orig_key,
+ instance_key,
+ )
+ state.key = instance_key
+
+ # there can be an existing state in the identity map
+ # that is replaced when the primary keys of two instances
+ # are swapped; see test/orm/test_naturalpks.py -> test_reverse
+ old = self.identity_map.replace(state)
+ if (
+ old is not None
+ and mapper._identity_key_from_state(old) == instance_key
+ and old.obj() is not None
+ ):
+ util.warn(
+ "Identity map already had an identity for %s, "
+ "replacing it with newly flushed object. Are there "
+ "load operations occurring inside of an event handler "
+ "within the flush?" % (instance_key,)
+ )
+ state._orphaned_outside_of_session = False
+
+ statelib.InstanceState._commit_all_states(
+ ((state, state.dict) for state in states), self.identity_map
+ )
+
+ self._register_altered(states)
+
+ if pending_to_persistent is not None:
+ for state in states.intersection(self._new):
+ pending_to_persistent(self, state)
+
+ # remove from new last, might be the last strong ref
+ for state in set(states).intersection(self._new):
+ self._new.pop(state)
+
+ def _register_altered(self, states):
+ if self._transaction:
+ for state in states:
+ if state in self._new:
+ self._transaction._new[state] = True
+ else:
+ self._transaction._dirty[state] = True
+
+ def _remove_newly_deleted(self, states):
+ persistent_to_deleted = self.dispatch.persistent_to_deleted or None
+ for state in states:
+ if self._transaction:
+ self._transaction._deleted[state] = True
+
+ if persistent_to_deleted is not None:
+ # get a strong reference before we pop out of
+ # self._deleted
+ obj = state.obj() # noqa
+
+ self.identity_map.safe_discard(state)
+ self._deleted.pop(state, None)
+ state._deleted = True
+ # can't call state._detach() here, because this state
+ # is still in the transaction snapshot and needs to be
+ # tracked as part of that
+ if persistent_to_deleted is not None:
+ persistent_to_deleted(self, state)
+
+ def add(self, instance, _warn=True):
+ """Place an object in the ``Session``.
+
+ Its state will be persisted to the database on the next flush
+ operation.
+
+ Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
+ is ``expunge()``.
+
+ """
+ if _warn and self._warn_on_events:
+ self._flush_warning("Session.add()")
+
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+
+ self._save_or_update_state(state)
+
+ def add_all(self, instances):
+ """Add the given collection of instances to this ``Session``."""
+
+ if self._warn_on_events:
+ self._flush_warning("Session.add_all()")
+
+ for instance in instances:
+ self.add(instance, _warn=False)
+
+ def _save_or_update_state(self, state):
+ state._orphaned_outside_of_session = False
+ self._save_or_update_impl(state)
+
+ mapper = _state_mapper(state)
+ for o, m, st_, dct_ in mapper.cascade_iterator(
+ "save-update", state, halt_on=self._contains_state
+ ):
+ self._save_or_update_impl(st_)
+
+ def delete(self, instance):
+ """Mark an instance as deleted.
+
+ The database delete operation occurs upon ``flush()``.
+
+ """
+ if self._warn_on_events:
+ self._flush_warning("Session.delete()")
+
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+
+ self._delete_impl(state, instance, head=True)
+
+ def _delete_impl(self, state, obj, head):
+
+ if state.key is None:
+ if head:
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' is not persisted" % state_str(state)
+ )
+ else:
+ return
+
+ to_attach = self._before_attach(state, obj)
+
+ if state in self._deleted:
+ return
+
+ self.identity_map.add(state)
+
+ if to_attach:
+ self._after_attach(state, obj)
+
+ if head:
+ # grab the cascades before adding the item to the deleted list
+ # so that autoflush does not delete the item
+ # the strong reference to the instance itself is significant here
+ cascade_states = list(
+ state.manager.mapper.cascade_iterator("delete", state)
+ )
+
+ self._deleted[state] = obj
+
+ if head:
+ for o, m, st_, dct_ in cascade_states:
+ self._delete_impl(st_, o, False)
+
+ def get(
+ self,
+ entity,
+ ident,
+ options=None,
+ populate_existing=False,
+ with_for_update=None,
+ identity_token=None,
+ execution_options=None,
+ ):
+ """Return an instance based on the given primary key identifier,
+ or ``None`` if not found.
+
+ E.g.::
+
+ my_user = session.get(User, 5)
+
+ some_object = session.get(VersionedFoo, (5, 10))
+
+ some_object = session.get(
+ VersionedFoo,
+ {"id": 5, "version_id": 10}
+ )
+
+ .. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved
+ from the now deprecated :meth:`_orm.Query.get` method.
+
+ :meth:`_orm.Session.get` is special in that it provides direct
+ access to the identity map of the :class:`.Session`.
+ If the given primary key identifier is present
+ in the local identity map, the object is returned
+ directly from this collection and no SQL is emitted,
+ unless the object has been marked fully expired.
+ If not present,
+ a SELECT is performed in order to locate the object.
+
+ :meth:`_orm.Session.get` also will perform a check if
+ the object is present in the identity map and
+ marked as expired - a SELECT
+ is emitted to refresh the object as well as to
+ ensure that the row is still present.
+ If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
+
+ :param entity: a mapped class or :class:`.Mapper` indicating the
+ type of entity to be loaded.
+
+ :param ident: A scalar, tuple, or dictionary representing the
+ primary key. For a composite (e.g. multiple column) primary key,
+ a tuple or dictionary should be passed.
+
+ For a single-column primary key, the scalar calling form is typically
+ the most expedient. If the primary key of a row is the value "5",
+ the call looks like::
+
+ my_object = session.get(SomeClass, 5)
+
+ The tuple form contains primary key values typically in
+ the order in which they correspond to the mapped
+ :class:`_schema.Table`
+ object's primary key columns, or if the
+ :paramref:`_orm.Mapper.primary_key` configuration parameter were
+ used, in
+ the order used for that parameter. For example, if the primary key
+ of a row is represented by the integer
+ digits "5, 10" the call would look like::
+
+ my_object = session.get(SomeClass, (5, 10))
+
+ The dictionary form should include as keys the mapped attribute names
+ corresponding to each element of the primary key. If the mapped class
+ has the attributes ``id``, ``version_id`` as the attributes which
+ store the object's primary key value, the call would look like::
+
+ my_object = session.get(SomeClass, {"id": 5, "version_id": 10})
+
+ :param options: optional sequence of loader options which will be
+ applied to the query, if one is emitted.
+
+ :param populate_existing: causes the method to unconditionally emit
+ a SQL query and refresh the object with the newly loaded data,
+ regardless of whether or not the object is already present.
+
+ :param with_for_update: optional boolean ``True`` indicating FOR UPDATE
+ should be used, or may be a dictionary containing flags to
+ indicate a more specific set of FOR UPDATE flags for the SELECT;
+ flags should match the parameters of
+ :meth:`_query.Query.with_for_update`.
+ Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
+
+ :param execution_options: optional dictionary of execution options,
+ which will be associated with the query execution if one is emitted.
+ This dictionary can provide a subset of the options that are
+ accepted by :meth:`_engine.Connection.execution_options`, and may
+ also provide additional options understood only in an ORM context.
+
+ .. versionadded:: 1.4.29
+
+ .. seealso::
+
+ :ref:`orm_queryguide_execution_options` - ORM-specific execution
+ options
+
+ :return: The object instance, or ``None``.
+
+ """
+ return self._get_impl(
+ entity,
+ ident,
+ loading.load_on_pk_identity,
+ options,
+ populate_existing=populate_existing,
+ with_for_update=with_for_update,
+ identity_token=identity_token,
+ execution_options=execution_options,
+ )
+
+ def _get_impl(
+ self,
+ entity,
+ primary_key_identity,
+ db_load_fn,
+ options=None,
+ populate_existing=False,
+ with_for_update=None,
+ identity_token=None,
+ execution_options=None,
+ ):
+
+ # convert composite types to individual args
+ if hasattr(primary_key_identity, "__composite_values__"):
+ primary_key_identity = primary_key_identity.__composite_values__()
+
+ mapper = inspect(entity)
+
+ if not mapper or not mapper.is_mapper:
+ raise sa_exc.ArgumentError(
+ "Expected mapped class or mapper, got: %r" % entity
+ )
+
+ is_dict = isinstance(primary_key_identity, dict)
+ if not is_dict:
+ primary_key_identity = util.to_list(
+ primary_key_identity, default=(None,)
+ )
+
+ if len(primary_key_identity) != len(mapper.primary_key):
+ raise sa_exc.InvalidRequestError(
+ "Incorrect number of values in identifier to formulate "
+ "primary key for session.get(); primary key columns "
+ "are %s" % ",".join("'%s'" % c for c in mapper.primary_key)
+ )
+
+ if is_dict:
+ try:
+ primary_key_identity = list(
+ primary_key_identity[prop.key]
+ for prop in mapper._identity_key_props
+ )
+
+ except KeyError as err:
+ util.raise_(
+ sa_exc.InvalidRequestError(
+ "Incorrect names of values in identifier to formulate "
+ "primary key for session.get(); primary key attribute "
+ "names are %s"
+ % ",".join(
+ "'%s'" % prop.key
+ for prop in mapper._identity_key_props
+ )
+ ),
+ replace_context=err,
+ )
+
+ if (
+ not populate_existing
+ and not mapper.always_refresh
+ and with_for_update is None
+ ):
+
+ instance = self._identity_lookup(
+ mapper, primary_key_identity, identity_token=identity_token
+ )
+
+ if instance is not None:
+ # reject calls for id in identity map but class
+ # mismatch.
+ if not issubclass(instance.__class__, mapper.class_):
+ return None
+ return instance
+ elif instance is attributes.PASSIVE_CLASS_MISMATCH:
+ return None
+
+ # set_label_style() not strictly necessary, however this will ensure
+ # that tablename_colname style is used which at the moment is
+ # asserted in a lot of unit tests :)
+
+ load_options = context.QueryContext.default_load_options
+
+ if populate_existing:
+ load_options += {"_populate_existing": populate_existing}
+ statement = sql.select(mapper).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ )
+ if with_for_update is not None:
+ statement._for_update_arg = query.ForUpdateArg._from_argument(
+ with_for_update
+ )
+
+ if options:
+ statement = statement.options(*options)
+ if execution_options:
+ statement = statement.execution_options(**execution_options)
+ return db_load_fn(
+ self,
+ statement,
+ primary_key_identity,
+ load_options=load_options,
+ )
+
+ def merge(self, instance, load=True, options=None):
+ """Copy the state of a given instance into a corresponding instance
+ within this :class:`.Session`.
+
+ :meth:`.Session.merge` examines the primary key attributes of the
+ source instance, and attempts to reconcile it with an instance of the
+ same primary key in the session. If not found locally, it attempts
+ to load the object from the database based on primary key, and if
+ none can be located, creates a new instance. The state of each
+ attribute on the source instance is then copied to the target
+ instance. The resulting target instance is then returned by the
+ method; the original source instance is left unmodified, and
+ un-associated with the :class:`.Session` if not already.
+
+ This operation cascades to associated instances if the association is
+ mapped with ``cascade="merge"``.
+
+ See :ref:`unitofwork_merging` for a detailed discussion of merging.
+
+ .. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile
+ pending objects with overlapping primary keys in the same way
+ as persistent. See :ref:`change_3601` for discussion.
+
+ :param instance: Instance to be merged.
+ :param load: Boolean, when False, :meth:`.merge` switches into
+ a "high performance" mode which causes it to forego emitting history
+ events as well as all database access. This flag is used for
+ cases such as transferring graphs of objects into a :class:`.Session`
+ from a second level cache, or to transfer just-loaded objects
+ into the :class:`.Session` owned by a worker thread or process
+ without re-querying the database.
+
+ The ``load=False`` use case adds the caveat that the given
+ object has to be in a "clean" state, that is, has no pending changes
+ to be flushed - even if the incoming object is detached from any
+ :class:`.Session`. This is so that when
+ the merge operation populates local attributes and
+ cascades to related objects and
+ collections, the values can be "stamped" onto the
+ target object as is, without generating any history or attribute
+ events, and without the need to reconcile the incoming data with
+ any existing related objects or collections that might not
+ be loaded. The resulting objects from ``load=False`` are always
+ produced as "clean", so it is only appropriate that the given objects
+ should be "clean" as well, else this suggests a mis-use of the
+ method.
+ :param options: optional sequence of loader options which will be
+ applied to the :meth:`_orm.Session.get` method when the merge
+ operation loads the existing version of the object from the database.
+
+ .. versionadded:: 1.4.24
+
+
+ .. seealso::
+
+ :func:`.make_transient_to_detached` - provides for an alternative
+ means of "merging" a single object into the :class:`.Session`
+
+ """
+
+ if self._warn_on_events:
+ self._flush_warning("Session.merge()")
+
+ _recursive = {}
+ _resolve_conflict_map = {}
+
+ if load:
+ # flush current contents if we expect to load data
+ self._autoflush()
+
+ object_mapper(instance) # verify mapped
+ autoflush = self.autoflush
+ try:
+ self.autoflush = False
+ return self._merge(
+ attributes.instance_state(instance),
+ attributes.instance_dict(instance),
+ load=load,
+ options=options,
+ _recursive=_recursive,
+ _resolve_conflict_map=_resolve_conflict_map,
+ )
+ finally:
+ self.autoflush = autoflush
+
+ def _merge(
+ self,
+ state,
+ state_dict,
+ load=True,
+ options=None,
+ _recursive=None,
+ _resolve_conflict_map=None,
+ ):
+ mapper = _state_mapper(state)
+ if state in _recursive:
+ return _recursive[state]
+
+ new_instance = False
+ key = state.key
+
+ if key is None:
+ if state in self._new:
+ util.warn(
+ "Instance %s is already pending in this Session yet is "
+ "being merged again; this is probably not what you want "
+ "to do" % state_str(state)
+ )
+
+ if not load:
+ raise sa_exc.InvalidRequestError(
+ "merge() with load=False option does not support "
+ "objects transient (i.e. unpersisted) objects. flush() "
+ "all changes on mapped instances before merging with "
+ "load=False."
+ )
+ key = mapper._identity_key_from_state(state)
+ key_is_persistent = attributes.NEVER_SET not in key[1] and (
+ not _none_set.intersection(key[1])
+ or (
+ mapper.allow_partial_pks
+ and not _none_set.issuperset(key[1])
+ )
+ )
+ else:
+ key_is_persistent = True
+
+ if key in self.identity_map:
+ try:
+ merged = self.identity_map[key]
+ except KeyError:
+ # object was GC'ed right as we checked for it
+ merged = None
+ else:
+ merged = None
+
+ if merged is None:
+ if key_is_persistent and key in _resolve_conflict_map:
+ merged = _resolve_conflict_map[key]
+
+ elif not load:
+ if state.modified:
+ raise sa_exc.InvalidRequestError(
+ "merge() with load=False option does not support "
+ "objects marked as 'dirty'. flush() all changes on "
+ "mapped instances before merging with load=False."
+ )
+ merged = mapper.class_manager.new_instance()
+ merged_state = attributes.instance_state(merged)
+ merged_state.key = key
+ self._update_impl(merged_state)
+ new_instance = True
+
+ elif key_is_persistent:
+ merged = self.get(
+ mapper.class_,
+ key[1],
+ identity_token=key[2],
+ options=options,
+ )
+
+ if merged is None:
+ merged = mapper.class_manager.new_instance()
+ merged_state = attributes.instance_state(merged)
+ merged_dict = attributes.instance_dict(merged)
+ new_instance = True
+ self._save_or_update_state(merged_state)
+ else:
+ merged_state = attributes.instance_state(merged)
+ merged_dict = attributes.instance_dict(merged)
+
+ _recursive[state] = merged
+ _resolve_conflict_map[key] = merged
+
+ # check that we didn't just pull the exact same
+ # state out.
+ if state is not merged_state:
+ # version check if applicable
+ if mapper.version_id_col is not None:
+ existing_version = mapper._get_state_attr_by_column(
+ state,
+ state_dict,
+ mapper.version_id_col,
+ passive=attributes.PASSIVE_NO_INITIALIZE,
+ )
+
+ merged_version = mapper._get_state_attr_by_column(
+ merged_state,
+ merged_dict,
+ mapper.version_id_col,
+ passive=attributes.PASSIVE_NO_INITIALIZE,
+ )
+
+ if (
+ existing_version is not attributes.PASSIVE_NO_RESULT
+ and merged_version is not attributes.PASSIVE_NO_RESULT
+ and existing_version != merged_version
+ ):
+ raise exc.StaleDataError(
+ "Version id '%s' on merged state %s "
+ "does not match existing version '%s'. "
+ "Leave the version attribute unset when "
+ "merging to update the most recent version."
+ % (
+ existing_version,
+ state_str(merged_state),
+ merged_version,
+ )
+ )
+
+ merged_state.load_path = state.load_path
+ merged_state.load_options = state.load_options
+
+ # since we are copying load_options, we need to copy
+ # the callables_ that would have been generated by those
+ # load_options.
+ # assumes that the callables we put in state.callables_
+ # are not instance-specific (which they should not be)
+ merged_state._copy_callables(state)
+
+ for prop in mapper.iterate_properties:
+ prop.merge(
+ self,
+ state,
+ state_dict,
+ merged_state,
+ merged_dict,
+ load,
+ _recursive,
+ _resolve_conflict_map,
+ )
+
+ if not load:
+ # remove any history
+ merged_state._commit_all(merged_dict, self.identity_map)
+
+ if new_instance:
+ merged_state.manager.dispatch.load(merged_state, None)
+ return merged
+
+ def _validate_persistent(self, state):
+ if not self.identity_map.contains_state(state):
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' is not persistent within this Session"
+ % state_str(state)
+ )
+
+ def _save_impl(self, state):
+ if state.key is not None:
+ raise sa_exc.InvalidRequestError(
+ "Object '%s' already has an identity - "
+ "it can't be registered as pending" % state_str(state)
+ )
+
+ obj = state.obj()
+ to_attach = self._before_attach(state, obj)
+ if state not in self._new:
+ self._new[state] = obj
+ state.insert_order = len(self._new)
+ if to_attach:
+ self._after_attach(state, obj)
+
+ def _update_impl(self, state, revert_deletion=False):
+ if state.key is None:
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' is not persisted" % state_str(state)
+ )
+
+ if state._deleted:
+ if revert_deletion:
+ if not state._attached:
+ return
+ del state._deleted
+ else:
+ raise sa_exc.InvalidRequestError(
+ "Instance '%s' has been deleted. "
+ "Use the make_transient() "
+ "function to send this object back "
+ "to the transient state." % state_str(state)
+ )
+
+ obj = state.obj()
+
+ # check for late gc
+ if obj is None:
+ return
+
+ to_attach = self._before_attach(state, obj)
+
+ self._deleted.pop(state, None)
+ if revert_deletion:
+ self.identity_map.replace(state)
+ else:
+ self.identity_map.add(state)
+
+ if to_attach:
+ self._after_attach(state, obj)
+ elif revert_deletion:
+ self.dispatch.deleted_to_persistent(self, state)
+
+ def _save_or_update_impl(self, state):
+ if state.key is None:
+ self._save_impl(state)
+ else:
+ self._update_impl(state)
+
+ def enable_relationship_loading(self, obj):
+ """Associate an object with this :class:`.Session` for related
+ object loading.
+
+ .. warning::
+
+ :meth:`.enable_relationship_loading` exists to serve special
+ use cases and is not recommended for general use.
+
+ Accesses of attributes mapped with :func:`_orm.relationship`
+ will attempt to load a value from the database using this
+ :class:`.Session` as the source of connectivity. The values
+ will be loaded based on foreign key and primary key values
+ present on this object - if not present, then those relationships
+ will be unavailable.
+
+ The object will be attached to this session, but will
+ **not** participate in any persistence operations; its state
+ for almost all purposes will remain either "transient" or
+ "detached", except for the case of relationship loading.
+
+ Also note that backrefs will often not work as expected.
+ Altering a relationship-bound attribute on the target object
+ may not fire off a backref event, if the effective value
+ is what was already loaded from a foreign-key-holding value.
+
+ The :meth:`.Session.enable_relationship_loading` method is
+ similar to the ``load_on_pending`` flag on :func:`_orm.relationship`.
+ Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
+ an object to remain transient while still being able to load
+ related items.
+
+ To make a transient object associated with a :class:`.Session`
+ via :meth:`.Session.enable_relationship_loading` pending, add
+ it to the :class:`.Session` using :meth:`.Session.add` normally.
+ If the object instead represents an existing identity in the database,
+ it should be merged using :meth:`.Session.merge`.
+
+ :meth:`.Session.enable_relationship_loading` does not improve
+ behavior when the ORM is used normally - object references should be
+ constructed at the object level, not at the foreign key level, so
+ that they are present in an ordinary way before flush()
+ proceeds. This method is not intended for general use.
+
+ .. seealso::
+
+ :paramref:`_orm.relationship.load_on_pending` - this flag
+ allows per-relationship loading of many-to-ones on items that
+ are pending.
+
+ :func:`.make_transient_to_detached` - allows for an object to
+ be added to a :class:`.Session` without SQL emitted, which then
+ will unexpire attributes on access.
+
+ """
+ try:
+ state = attributes.instance_state(obj)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(obj),
+ replace_context=err,
+ )
+
+ to_attach = self._before_attach(state, obj)
+ state._load_pending = True
+ if to_attach:
+ self._after_attach(state, obj)
+
+ def _before_attach(self, state, obj):
+ self._autobegin()
+
+ if state.session_id == self.hash_key:
+ return False
+
+ if state.session_id and state.session_id in _sessions:
+ raise sa_exc.InvalidRequestError(
+ "Object '%s' is already attached to session '%s' "
+ "(this is '%s')"
+ % (state_str(state), state.session_id, self.hash_key)
+ )
+
+ self.dispatch.before_attach(self, state)
+
+ return True
+
+ def _after_attach(self, state, obj):
+ state.session_id = self.hash_key
+ if state.modified and state._strong_obj is None:
+ state._strong_obj = obj
+ self.dispatch.after_attach(self, state)
+
+ if state.key:
+ self.dispatch.detached_to_persistent(self, state)
+ else:
+ self.dispatch.transient_to_pending(self, state)
+
+ def __contains__(self, instance):
+ """Return True if the instance is associated with this session.
+
+ The instance may be pending or persistent within the Session for a
+ result of True.
+
+ """
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+ return self._contains_state(state)
+
+ def __iter__(self):
+ """Iterate over all pending or persistent instances within this
+ Session.
+
+ """
+ return iter(
+ list(self._new.values()) + list(self.identity_map.values())
+ )
+
+ def _contains_state(self, state):
+ return state in self._new or self.identity_map.contains_state(state)
+
+ def flush(self, objects=None):
+ """Flush all the object changes to the database.
+
+ Writes out all pending object creations, deletions and modifications
+ to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
+ automatically ordered by the Session's unit of work dependency
+ solver.
+
+ Database operations will be issued in the current transactional
+ context and do not affect the state of the transaction, unless an
+ error occurs, in which case the entire transaction is rolled back.
+ You may flush() as often as you like within a transaction to move
+ changes from Python to the database's transaction buffer.
+
+ For ``autocommit`` Sessions with no active manual transaction, flush()
+ will create a transaction on the fly that surrounds the entire set of
+ operations into the flush.
+
+ :param objects: Optional; restricts the flush operation to operate
+ only on elements that are in the given collection.
+
+ This feature is for an extremely narrow set of use cases where
+ particular objects may need to be operated upon before the
+ full flush() occurs. It is not intended for general use.
+
+ """
+
+ if self._flushing:
+ raise sa_exc.InvalidRequestError("Session is already flushing")
+
+ if self._is_clean():
+ return
+ try:
+ self._flushing = True
+ self._flush(objects)
+ finally:
+ self._flushing = False
+
+ def _flush_warning(self, method):
+ util.warn(
+ "Usage of the '%s' operation is not currently supported "
+ "within the execution stage of the flush process. "
+ "Results may not be consistent. Consider using alternative "
+ "event listeners or connection-level operations instead." % method
+ )
+
+ def _is_clean(self):
+ return (
+ not self.identity_map.check_modified()
+ and not self._deleted
+ and not self._new
+ )
+
+ def _flush(self, objects=None):
+
+ dirty = self._dirty_states
+ if not dirty and not self._deleted and not self._new:
+ self.identity_map._modified.clear()
+ return
+
+ flush_context = UOWTransaction(self)
+
+ if self.dispatch.before_flush:
+ self.dispatch.before_flush(self, flush_context, objects)
+ # re-establish "dirty states" in case the listeners
+ # added
+ dirty = self._dirty_states
+
+ deleted = set(self._deleted)
+ new = set(self._new)
+
+ dirty = set(dirty).difference(deleted)
+
+ # create the set of all objects we want to operate upon
+ if objects:
+ # specific list passed in
+ objset = set()
+ for o in objects:
+ try:
+ state = attributes.instance_state(o)
+
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(o),
+ replace_context=err,
+ )
+ objset.add(state)
+ else:
+ objset = None
+
+ # store objects whose fate has been decided
+ processed = set()
+
+ # put all saves/updates into the flush context. detect top-level
+ # orphans and throw them into deleted.
+ if objset:
+ proc = new.union(dirty).intersection(objset).difference(deleted)
+ else:
+ proc = new.union(dirty).difference(deleted)
+
+ for state in proc:
+ is_orphan = _state_mapper(state)._is_orphan(state)
+
+ is_persistent_orphan = is_orphan and state.has_identity
+
+ if (
+ is_orphan
+ and not is_persistent_orphan
+ and state._orphaned_outside_of_session
+ ):
+ self._expunge_states([state])
+ else:
+ _reg = flush_context.register_object(
+ state, isdelete=is_persistent_orphan
+ )
+ assert _reg, "Failed to add object to the flush context!"
+ processed.add(state)
+
+ # put all remaining deletes into the flush context.
+ if objset:
+ proc = deleted.intersection(objset).difference(processed)
+ else:
+ proc = deleted.difference(processed)
+ for state in proc:
+ _reg = flush_context.register_object(state, isdelete=True)
+ assert _reg, "Failed to add object to the flush context!"
+
+ if not flush_context.has_work:
+ return
+
+ flush_context.transaction = transaction = self.begin(_subtrans=True)
+ try:
+ self._warn_on_events = True
+ try:
+ flush_context.execute()
+ finally:
+ self._warn_on_events = False
+
+ self.dispatch.after_flush(self, flush_context)
+
+ flush_context.finalize_flush_changes()
+
+ if not objects and self.identity_map._modified:
+ len_ = len(self.identity_map._modified)
+
+ statelib.InstanceState._commit_all_states(
+ [
+ (state, state.dict)
+ for state in self.identity_map._modified
+ ],
+ instance_dict=self.identity_map,
+ )
+ util.warn(
+ "Attribute history events accumulated on %d "
+ "previously clean instances "
+ "within inner-flush event handlers have been "
+ "reset, and will not result in database updates. "
+ "Consider using set_committed_value() within "
+ "inner-flush event handlers to avoid this warning." % len_
+ )
+
+ # useful assertions:
+ # if not objects:
+ # assert not self.identity_map._modified
+ # else:
+ # assert self.identity_map._modified == \
+ # self.identity_map._modified.difference(objects)
+
+ self.dispatch.after_flush_postexec(self, flush_context)
+
+ transaction.commit()
+
+ except:
+ with util.safe_reraise():
+ transaction.rollback(_capture_exception=True)
+
+ def bulk_save_objects(
+ self,
+ objects,
+ return_defaults=False,
+ update_changed_only=True,
+ preserve_order=True,
+ ):
+ """Perform a bulk save of the given list of objects.
+
+ The bulk save feature allows mapped objects to be used as the
+ source of simple INSERT and UPDATE operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations; the extraction of data from the objects is also performed
+ using a lower-latency process that ignores whether or not attributes
+ have actually been modified in the case of UPDATEs, and also ignores
+ SQL expressions.
+
+ The objects as given are not added to the session and no additional
+ state is established on them. If the
+ :paramref:`_orm.Session.bulk_save_objects.return_defaults` flag is set,
+ then server-generated primary key values will be assigned to the
+ returned objects, but **not server side defaults**; this is a
+ limitation in the implementation. If stateful objects are desired,
+ please use the standard :meth:`_orm.Session.add_all` approach or
+ as an alternative newer mass-insert features such as
+ :ref:`orm_dml_returning_objects`.
+
+ .. warning::
+
+ The bulk save feature allows for a lower-latency INSERT/UPDATE
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ INSERT/UPDATES of records.
+
+ Please note that newer versions of SQLAlchemy are **greatly
+ improving the efficiency** of the standard flush process. It is
+ **strongly recommended** to not use the bulk methods as they
+ represent a forking of SQLAlchemy's functionality and are slowly
+ being moved into legacy status. New features such as
+ :ref:`orm_dml_returning_objects` are both more efficient than
+ the "bulk" methods and provide more predictable functionality.
+
+ **Please read the list of caveats at**
+ :ref:`bulk_operations_caveats` **before using this method, and
+ fully test and confirm the functionality of all code developed
+ using these systems.**
+
+ :param objects: a sequence of mapped object instances. The mapped
+ objects are persisted as is, and are **not** associated with the
+ :class:`.Session` afterwards.
+
+ For each object, whether the object is sent as an INSERT or an
+ UPDATE is dependent on the same rules used by the :class:`.Session`
+ in traditional operation; if the object has the
+ :attr:`.InstanceState.key`
+ attribute set, then the object is assumed to be "detached" and
+ will result in an UPDATE. Otherwise, an INSERT is used.
+
+ In the case of an UPDATE, statements are grouped based on which
+ attributes have changed, and are thus to be the subject of each
+ SET clause. If ``update_changed_only`` is False, then all
+ attributes present within each object are applied to the UPDATE
+ statement, which may help in allowing the statements to be grouped
+ together into a larger executemany(), and will also reduce the
+ overhead of checking history on attributes.
+
+ :param return_defaults: when True, rows that are missing values which
+ generate defaults, namely integer primary key defaults and sequences,
+ will be inserted **one at a time**, so that the primary key value
+ is available. In particular this will allow joined-inheritance
+ and other multi-table mappings to insert correctly without the need
+ to provide primary key values ahead of time; however,
+ :paramref:`.Session.bulk_save_objects.return_defaults` **greatly
+ reduces the performance gains** of the method overall. It is strongly
+ advised to please use the standard :meth:`_orm.Session.add_all`
+ approach.
+
+ :param update_changed_only: when True, UPDATE statements are rendered
+ based on those attributes in each state that have logged changes.
+ When False, all attributes present are rendered into the SET clause
+ with the exception of primary key attributes.
+
+ :param preserve_order: when True, the order of inserts and updates
+ matches exactly the order in which the objects are given. When
+ False, common types of objects are grouped into inserts
+ and updates, to allow for more batching opportunities.
+
+ .. versionadded:: 1.3
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_insert_mappings`
+
+ :meth:`.Session.bulk_update_mappings`
+
+ """
+
+ obj_states = (attributes.instance_state(obj) for obj in objects)
+
+ if not preserve_order:
+ # the purpose of this sort is just so that common mappers
+ # and persistence states are grouped together, so that groupby
+ # will return a single group for a particular type of mapper.
+ # it's not trying to be deterministic beyond that.
+ obj_states = sorted(
+ obj_states,
+ key=lambda state: (id(state.mapper), state.key is not None),
+ )
+
+ def grouping_key(state):
+ return (state.mapper, state.key is not None)
+
+ for (mapper, isupdate), states in itertools.groupby(
+ obj_states, grouping_key
+ ):
+ self._bulk_save_mappings(
+ mapper,
+ states,
+ isupdate,
+ True,
+ return_defaults,
+ update_changed_only,
+ False,
+ )
+
+ def bulk_insert_mappings(
+ self, mapper, mappings, return_defaults=False, render_nulls=False
+ ):
+ """Perform a bulk insert of the given list of mapping dictionaries.
+
+ The bulk insert feature allows plain Python dictionaries to be used as
+ the source of simple INSERT operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations. Using dictionaries, there is no "history" or session
+ state management features in use, reducing latency when inserting
+ large numbers of simple rows.
+
+ The values within the dictionaries as given are typically passed
+ without modification into Core :meth:`_expression.Insert` constructs,
+ after
+ organizing the values within them across the tables to which
+ the given mapper is mapped.
+
+ .. versionadded:: 1.0.0
+
+ .. warning::
+
+ The bulk insert feature allows for a lower-latency INSERT
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ INSERT of records.
+
+ Please note that newer versions of SQLAlchemy are **greatly
+ improving the efficiency** of the standard flush process. It is
+ **strongly recommended** to not use the bulk methods as they
+ represent a forking of SQLAlchemy's functionality and are slowly
+ being moved into legacy status. New features such as
+ :ref:`orm_dml_returning_objects` are both more efficient than
+ the "bulk" methods and provide more predictable functionality.
+
+ **Please read the list of caveats at**
+ :ref:`bulk_operations_caveats` **before using this method, and
+ fully test and confirm the functionality of all code developed
+ using these systems.**
+
+ :param mapper: a mapped class, or the actual :class:`_orm.Mapper`
+ object,
+ representing the single kind of object represented within the mapping
+ list.
+
+ :param mappings: a sequence of dictionaries, each one containing the
+ state of the mapped row to be inserted, in terms of the attribute
+ names on the mapped class. If the mapping refers to multiple tables,
+ such as a joined-inheritance mapping, each dictionary must contain all
+ keys to be populated into all tables.
+
+ :param return_defaults: when True, rows that are missing values which
+ generate defaults, namely integer primary key defaults and sequences,
+ will be inserted **one at a time**, so that the primary key value
+ is available. In particular this will allow joined-inheritance
+ and other multi-table mappings to insert correctly without the need
+ to provide primary
+ key values ahead of time; however,
+ :paramref:`.Session.bulk_insert_mappings.return_defaults`
+ **greatly reduces the performance gains** of the method overall.
+ If the rows
+ to be inserted only refer to a single table, then there is no
+ reason this flag should be set as the returned default information
+ is not used.
+
+ :param render_nulls: When True, a value of ``None`` will result
+ in a NULL value being included in the INSERT statement, rather
+ than the column being omitted from the INSERT. This allows all
+ the rows being INSERTed to have the identical set of columns which
+ allows the full set of rows to be batched to the DBAPI. Normally,
+ each column-set that contains a different combination of NULL values
+ than the previous row must omit a different series of columns from
+ the rendered INSERT statement, which means it must be emitted as a
+ separate statement. By passing this flag, the full set of rows
+ are guaranteed to be batchable into one batch; the cost however is
+ that server-side defaults which are invoked by an omitted column will
+ be skipped, so care must be taken to ensure that these are not
+ necessary.
+
+ .. warning::
+
+ When this flag is set, **server side default SQL values will
+ not be invoked** for those columns that are inserted as NULL;
+ the NULL value will be sent explicitly. Care must be taken
+ to ensure that no server-side default functions need to be
+ invoked for the operation as a whole.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_save_objects`
+
+ :meth:`.Session.bulk_update_mappings`
+
+ """
+ self._bulk_save_mappings(
+ mapper,
+ mappings,
+ False,
+ False,
+ return_defaults,
+ False,
+ render_nulls,
+ )
+
+ def bulk_update_mappings(self, mapper, mappings):
+ """Perform a bulk update of the given list of mapping dictionaries.
+
+ The bulk update feature allows plain Python dictionaries to be used as
+ the source of simple UPDATE operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations. Using dictionaries, there is no "history" or session
+ state management features in use, reducing latency when updating
+ large numbers of simple rows.
+
+ .. versionadded:: 1.0.0
+
+ .. warning::
+
+ The bulk update feature allows for a lower-latency UPDATE
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ UPDATES of records.
+
+ Please note that newer versions of SQLAlchemy are **greatly
+ improving the efficiency** of the standard flush process. It is
+ **strongly recommended** to not use the bulk methods as they
+ represent a forking of SQLAlchemy's functionality and are slowly
+ being moved into legacy status. New features such as
+ :ref:`orm_dml_returning_objects` are both more efficient than
+ the "bulk" methods and provide more predictable functionality.
+
+ **Please read the list of caveats at**
+ :ref:`bulk_operations_caveats` **before using this method, and
+ fully test and confirm the functionality of all code developed
+ using these systems.**
+
+ :param mapper: a mapped class, or the actual :class:`_orm.Mapper`
+ object,
+ representing the single kind of object represented within the mapping
+ list.
+
+ :param mappings: a sequence of dictionaries, each one containing the
+ state of the mapped row to be updated, in terms of the attribute names
+ on the mapped class. If the mapping refers to multiple tables, such
+ as a joined-inheritance mapping, each dictionary may contain keys
+ corresponding to all tables. All those keys which are present and
+ are not part of the primary key are applied to the SET clause of the
+ UPDATE statement; the primary key values, which are required, are
+ applied to the WHERE clause.
+
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_insert_mappings`
+
+ :meth:`.Session.bulk_save_objects`
+
+ """
+ self._bulk_save_mappings(
+ mapper, mappings, True, False, False, False, False
+ )
+
+ def _bulk_save_mappings(
+ self,
+ mapper,
+ mappings,
+ isupdate,
+ isstates,
+ return_defaults,
+ update_changed_only,
+ render_nulls,
+ ):
+ mapper = _class_to_mapper(mapper)
+ self._flushing = True
+
+ transaction = self.begin(_subtrans=True)
+ try:
+ if isupdate:
+ persistence._bulk_update(
+ mapper,
+ mappings,
+ transaction,
+ isstates,
+ update_changed_only,
+ )
+ else:
+ persistence._bulk_insert(
+ mapper,
+ mappings,
+ transaction,
+ isstates,
+ return_defaults,
+ render_nulls,
+ )
+ transaction.commit()
+
+ except:
+ with util.safe_reraise():
+ transaction.rollback(_capture_exception=True)
+ finally:
+ self._flushing = False
+
+ def is_modified(self, instance, include_collections=True):
+ r"""Return ``True`` if the given instance has locally
+ modified attributes.
+
+ This method retrieves the history for each instrumented
+ attribute on the instance and performs a comparison of the current
+ value to its previously committed value, if any.
+
+ It is in effect a more expensive and accurate
+ version of checking for the given instance in the
+ :attr:`.Session.dirty` collection; a full test for
+ each attribute's net "dirty" status is performed.
+
+ E.g.::
+
+ return session.is_modified(someobject)
+
+ A few caveats to this method apply:
+
+ * Instances present in the :attr:`.Session.dirty` collection may
+ report ``False`` when tested with this method. This is because
+ the object may have received change events via attribute mutation,
+ thus placing it in :attr:`.Session.dirty`, but ultimately the state
+ is the same as that loaded from the database, resulting in no net
+ change here.
+ * Scalar attributes may not have recorded the previously set
+ value when a new value was applied, if the attribute was not loaded,
+ or was expired, at the time the new value was received - in these
+ cases, the attribute is assumed to have a change, even if there is
+ ultimately no net change against its database value. SQLAlchemy in
+ most cases does not need the "old" value when a set event occurs, so
+ it skips the expense of a SQL call if the old value isn't present,
+ based on the assumption that an UPDATE of the scalar value is
+ usually needed, and in those few cases where it isn't, is less
+ expensive on average than issuing a defensive SELECT.
+
+ The "old" value is fetched unconditionally upon set only if the
+ attribute container has the ``active_history`` flag set to ``True``.
+ This flag is set typically for primary key attributes and scalar
+ object references that are not a simple many-to-one. To set this
+ flag for any arbitrary mapped column, use the ``active_history``
+ argument with :func:`.column_property`.
+
+ :param instance: mapped instance to be tested for pending changes.
+ :param include_collections: Indicates if multivalued collections
+ should be included in the operation. Setting this to ``False`` is a
+ way to detect only local-column based properties (i.e. scalar columns
+ or many-to-one foreign keys) that would result in an UPDATE for this
+ instance upon flush.
+
+ """
+ state = object_state(instance)
+
+ if not state.modified:
+ return False
+
+ dict_ = state.dict
+
+ for attr in state.manager.attributes:
+ if (
+ not include_collections
+ and hasattr(attr.impl, "get_collection")
+ ) or not hasattr(attr.impl, "get_history"):
+ continue
+
+ (added, unchanged, deleted) = attr.impl.get_history(
+ state, dict_, passive=attributes.NO_CHANGE
+ )
+
+ if added or deleted:
+ return True
+ else:
+ return False
+
+ @property
+ def is_active(self):
+ """True if this :class:`.Session` not in "partial rollback" state.
+
+ .. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins
+ a new transaction immediately, so this attribute will be False
+ when the :class:`_orm.Session` is first instantiated.
+
+ "partial rollback" state typically indicates that the flush process
+ of the :class:`_orm.Session` has failed, and that the
+ :meth:`_orm.Session.rollback` method must be emitted in order to
+ fully roll back the transaction.
+
+ If this :class:`_orm.Session` is not in a transaction at all, the
+ :class:`_orm.Session` will autobegin when it is first used, so in this
+ case :attr:`_orm.Session.is_active` will return True.
+
+ Otherwise, if this :class:`_orm.Session` is within a transaction,
+ and that transaction has not been rolled back internally, the
+ :attr:`_orm.Session.is_active` will also return True.
+
+ .. seealso::
+
+ :ref:`faq_session_rollback`
+
+ :meth:`_orm.Session.in_transaction`
+
+ """
+ if self.autocommit:
+ return (
+ self._transaction is not None and self._transaction.is_active
+ )
+ else:
+ return self._transaction is None or self._transaction.is_active
+
+ identity_map = None
+ """A mapping of object identities to objects themselves.
+
+ Iterating through ``Session.identity_map.values()`` provides
+ access to the full set of persistent objects (i.e., those
+ that have row identity) currently in the session.
+
+ .. seealso::
+
+ :func:`.identity_key` - helper function to produce the keys used
+ in this dictionary.
+
+ """
+
+ @property
+ def _dirty_states(self):
+ """The set of all persistent states considered dirty.
+
+ This method returns all states that were modified including
+ those that were possibly deleted.
+
+ """
+ return self.identity_map._dirty_states()
+
+ @property
+ def dirty(self):
+ """The set of all persistent instances considered dirty.
+
+ E.g.::
+
+ some_mapped_object in session.dirty
+
+ Instances are considered dirty when they were modified but not
+ deleted.
+
+ Note that this 'dirty' calculation is 'optimistic'; most
+ attribute-setting or collection modification operations will
+ mark an instance as 'dirty' and place it in this set, even if
+ there is no net change to the attribute's value. At flush
+ time, the value of each attribute is compared to its
+ previously saved value, and if there's no net change, no SQL
+ operation will occur (this is a more expensive operation so
+ it's only done at flush time).
+
+ To check if an instance has actionable net changes to its
+ attributes, use the :meth:`.Session.is_modified` method.
+
+ """
+ return util.IdentitySet(
+ [
+ state.obj()
+ for state in self._dirty_states
+ if state not in self._deleted
+ ]
+ )
+
+ @property
+ def deleted(self):
+ "The set of all instances marked as 'deleted' within this ``Session``"
+
+ return util.IdentitySet(list(self._deleted.values()))
+
+ @property
+ def new(self):
+ "The set of all instances marked as 'new' within this ``Session``."
+
+ return util.IdentitySet(list(self._new.values()))
+
+
+class sessionmaker(_SessionClassMethods):
+ """A configurable :class:`.Session` factory.
+
+ The :class:`.sessionmaker` factory generates new
+ :class:`.Session` objects when called, creating them given
+ the configurational arguments established here.
+
+ e.g.::
+
+ from sqlalchemy import create_engine
+ from sqlalchemy.orm import sessionmaker
+
+ # an Engine, which the Session will use for connection
+ # resources
+ engine = create_engine('postgresql://scott:tiger@localhost/')
+
+ Session = sessionmaker(engine)
+
+ with Session() as session:
+ session.add(some_object)
+ session.add(some_other_object)
+ session.commit()
+
+ Context manager use is optional; otherwise, the returned
+ :class:`_orm.Session` object may be closed explicitly via the
+ :meth:`_orm.Session.close` method. Using a
+ ``try:/finally:`` block is optional, however will ensure that the close
+ takes place even if there are database errors::
+
+ session = Session()
+ try:
+ session.add(some_object)
+ session.add(some_other_object)
+ session.commit()
+ finally:
+ session.close()
+
+ :class:`.sessionmaker` acts as a factory for :class:`_orm.Session`
+ objects in the same way as an :class:`_engine.Engine` acts as a factory
+ for :class:`_engine.Connection` objects. In this way it also includes
+ a :meth:`_orm.sessionmaker.begin` method, that provides a context
+ manager which both begins and commits a transaction, as well as closes
+ out the :class:`_orm.Session` when complete, rolling back the transaction
+ if any errors occur::
+
+ Session = sessionmaker(engine)
+
+ with Session.begin() as session:
+ session.add(some_object)
+ session.add(some_other_object)
+ # commits transaction, closes session
+
+ .. versionadded:: 1.4
+
+ When calling upon :class:`_orm.sessionmaker` to construct a
+ :class:`_orm.Session`, keyword arguments may also be passed to the
+ method; these arguments will override that of the globally configured
+ parameters. Below we use a :class:`_orm.sessionmaker` bound to a certain
+ :class:`_engine.Engine` to produce a :class:`_orm.Session` that is instead
+ bound to a specific :class:`_engine.Connection` procured from that engine::
+
+ Session = sessionmaker(engine)
+
+ # bind an individual session to a connection
+
+ with engine.connect() as connection:
+ with Session(bind=connection) as session:
+ # work with session
+
+ The class also includes a method :meth:`_orm.sessionmaker.configure`, which
+ can be used to specify additional keyword arguments to the factory, which
+ will take effect for subsequent :class:`.Session` objects generated. This
+ is usually used to associate one or more :class:`_engine.Engine` objects
+ with an existing
+ :class:`.sessionmaker` factory before it is first used::
+
+ # application starts, sessionmaker does not have
+ # an engine bound yet
+ Session = sessionmaker()
+
+ # ... later, when an engine URL is read from a configuration
+ # file or other events allow the engine to be created
+ engine = create_engine('sqlite:///foo.db')
+ Session.configure(bind=engine)
+
+ sess = Session()
+ # work with session
+
+ .. seealso::
+
+ :ref:`session_getting` - introductory text on creating
+ sessions using :class:`.sessionmaker`.
+
+ """
+
+ def __init__(
+ self,
+ bind=None,
+ class_=Session,
+ autoflush=True,
+ autocommit=False,
+ expire_on_commit=True,
+ info=None,
+ **kw
+ ):
+ r"""Construct a new :class:`.sessionmaker`.
+
+ All arguments here except for ``class_`` correspond to arguments
+ accepted by :class:`.Session` directly. See the
+ :meth:`.Session.__init__` docstring for more details on parameters.
+
+ :param bind: a :class:`_engine.Engine` or other :class:`.Connectable`
+ with
+ which newly created :class:`.Session` objects will be associated.
+ :param class\_: class to use in order to create new :class:`.Session`
+ objects. Defaults to :class:`.Session`.
+ :param autoflush: The autoflush setting to use with newly created
+ :class:`.Session` objects.
+ :param autocommit: The autocommit setting to use with newly created
+ :class:`.Session` objects.
+ :param expire_on_commit=True: the
+ :paramref:`_orm.Session.expire_on_commit` setting to use
+ with newly created :class:`.Session` objects.
+
+ :param info: optional dictionary of information that will be available
+ via :attr:`.Session.info`. Note this dictionary is *updated*, not
+ replaced, when the ``info`` parameter is specified to the specific
+ :class:`.Session` construction operation.
+
+ :param \**kw: all other keyword arguments are passed to the
+ constructor of newly created :class:`.Session` objects.
+
+ """
+ kw["bind"] = bind
+ kw["autoflush"] = autoflush
+ kw["autocommit"] = autocommit
+ kw["expire_on_commit"] = expire_on_commit
+ if info is not None:
+ kw["info"] = info
+ self.kw = kw
+ # make our own subclass of the given class, so that
+ # events can be associated with it specifically.
+ self.class_ = type(class_.__name__, (class_,), {})
+
+ def begin(self):
+ """Produce a context manager that both provides a new
+ :class:`_orm.Session` as well as a transaction that commits.
+
+
+ e.g.::
+
+ Session = sessionmaker(some_engine)
+
+ with Session.begin() as session:
+ session.add(some_object)
+
+ # commits transaction, closes session
+
+ .. versionadded:: 1.4
+
+
+ """
+
+ session = self()
+ return session._maker_context_manager()
+
+ def __call__(self, **local_kw):
+ """Produce a new :class:`.Session` object using the configuration
+ established in this :class:`.sessionmaker`.
+
+ In Python, the ``__call__`` method is invoked on an object when
+ it is "called" in the same way as a function::
+
+ Session = sessionmaker()
+ session = Session() # invokes sessionmaker.__call__()
+
+ """
+ for k, v in self.kw.items():
+ if k == "info" and "info" in local_kw:
+ d = v.copy()
+ d.update(local_kw["info"])
+ local_kw["info"] = d
+ else:
+ local_kw.setdefault(k, v)
+ return self.class_(**local_kw)
+
+ def configure(self, **new_kw):
+ """(Re)configure the arguments for this sessionmaker.
+
+ e.g.::
+
+ Session = sessionmaker()
+
+ Session.configure(bind=create_engine('sqlite://'))
+ """
+ self.kw.update(new_kw)
+
+ def __repr__(self):
+ return "%s(class_=%r, %s)" % (
+ self.__class__.__name__,
+ self.class_.__name__,
+ ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
+ )
+
+
+def close_all_sessions():
+ """Close all sessions in memory.
+
+ This function consults a global registry of all :class:`.Session` objects
+ and calls :meth:`.Session.close` on them, which resets them to a clean
+ state.
+
+ This function is not for general use but may be useful for test suites
+ within the teardown scheme.
+
+ .. versionadded:: 1.3
+
+ """
+
+ for sess in _sessions.values():
+ sess.close()
+
+
+def make_transient(instance):
+ """Alter the state of the given instance so that it is :term:`transient`.
+
+ .. note::
+
+ :func:`.make_transient` is a special-case function for
+ advanced use cases only.
+
+ The given mapped instance is assumed to be in the :term:`persistent` or
+ :term:`detached` state. The function will remove its association with any
+ :class:`.Session` as well as its :attr:`.InstanceState.identity`. The
+ effect is that the object will behave as though it were newly constructed,
+ except retaining any attribute / collection values that were loaded at the
+ time of the call. The :attr:`.InstanceState.deleted` flag is also reset
+ if this object had been deleted as a result of using
+ :meth:`.Session.delete`.
+
+ .. warning::
+
+ :func:`.make_transient` does **not** "unexpire" or otherwise eagerly
+ load ORM-mapped attributes that are not currently loaded at the time
+ the function is called. This includes attributes which:
+
+ * were expired via :meth:`.Session.expire`
+
+ * were expired as the natural effect of committing a session
+ transaction, e.g. :meth:`.Session.commit`
+
+ * are normally :term:`lazy loaded` but are not currently loaded
+
+ * are "deferred" via :ref:`deferred` and are not yet loaded
+
+ * were not present in the query which loaded this object, such as that
+ which is common in joined table inheritance and other scenarios.
+
+ After :func:`.make_transient` is called, unloaded attributes such
+ as those above will normally resolve to the value ``None`` when
+ accessed, or an empty collection for a collection-oriented attribute.
+ As the object is transient and un-associated with any database
+ identity, it will no longer retrieve these values.
+
+ .. seealso::
+
+ :func:`.make_transient_to_detached`
+
+ """
+ state = attributes.instance_state(instance)
+ s = _state_session(state)
+ if s:
+ s._expunge_states([state])
+
+ # remove expired state
+ state.expired_attributes.clear()
+
+ # remove deferred callables
+ if state.callables:
+ del state.callables
+
+ if state.key:
+ del state.key
+ if state._deleted:
+ del state._deleted
+
+
+def make_transient_to_detached(instance):
+ """Make the given transient instance :term:`detached`.
+
+ .. note::
+
+ :func:`.make_transient_to_detached` is a special-case function for
+ advanced use cases only.
+
+ All attribute history on the given instance
+ will be reset as though the instance were freshly loaded
+ from a query. Missing attributes will be marked as expired.
+ The primary key attributes of the object, which are required, will be made
+ into the "key" of the instance.
+
+ The object can then be added to a session, or merged
+ possibly with the load=False flag, at which point it will look
+ as if it were loaded that way, without emitting SQL.
+
+ This is a special use case function that differs from a normal
+ call to :meth:`.Session.merge` in that a given persistent state
+ can be manufactured without any SQL calls.
+
+ .. seealso::
+
+ :func:`.make_transient`
+
+ :meth:`.Session.enable_relationship_loading`
+
+ """
+ state = attributes.instance_state(instance)
+ if state.session_id or state.key:
+ raise sa_exc.InvalidRequestError("Given object must be transient")
+ state.key = state.mapper._identity_key_from_state(state)
+ if state._deleted:
+ del state._deleted
+ state._commit_all(state.dict)
+ state._expire_attributes(state.dict, state.unloaded_expirable)
+
+
+def object_session(instance):
+ """Return the :class:`.Session` to which the given instance belongs.
+
+ This is essentially the same as the :attr:`.InstanceState.session`
+ accessor. See that attribute for details.
+
+ """
+
+ try:
+ state = attributes.instance_state(instance)
+ except exc.NO_STATE as err:
+ util.raise_(
+ exc.UnmappedInstanceError(instance),
+ replace_context=err,
+ )
+ else:
+ return _state_session(state)
+
+
+_new_sessionid = util.counter()
diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py
new file mode 100644
index 0000000..9718024
--- /dev/null
+++ b/lib/sqlalchemy/orm/state.py
@@ -0,0 +1,1025 @@
+# orm/state.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""Defines instrumentation of instances.
+
+This module is usually not directly visible to user applications, but
+defines a large part of the ORM's interactivity.
+
+"""
+
+import weakref
+
+from . import base
+from . import exc as orm_exc
+from . import interfaces
+from .base import ATTR_WAS_SET
+from .base import INIT_OK
+from .base import NEVER_SET
+from .base import NO_VALUE
+from .base import PASSIVE_NO_INITIALIZE
+from .base import PASSIVE_NO_RESULT
+from .base import PASSIVE_OFF
+from .base import SQL_OK
+from .path_registry import PathRegistry
+from .. import exc as sa_exc
+from .. import inspection
+from .. import util
+
+
+# late-populated by session.py
+_sessions = None
+
+# optionally late-provided by sqlalchemy.ext.asyncio.session
+_async_provider = None
+
+
+@inspection._self_inspects
+class InstanceState(interfaces.InspectionAttrInfo):
+ """tracks state information at the instance level.
+
+ The :class:`.InstanceState` is a key object used by the
+ SQLAlchemy ORM in order to track the state of an object;
+ it is created the moment an object is instantiated, typically
+ as a result of :term:`instrumentation` which SQLAlchemy applies
+ to the ``__init__()`` method of the class.
+
+ :class:`.InstanceState` is also a semi-public object,
+ available for runtime inspection as to the state of a
+ mapped instance, including information such as its current
+ status within a particular :class:`.Session` and details
+ about data on individual attributes. The public API
+ in order to acquire a :class:`.InstanceState` object
+ is to use the :func:`_sa.inspect` system::
+
+ >>> from sqlalchemy import inspect
+ >>> insp = inspect(some_mapped_object)
+ >>> insp.attrs.nickname.history
+ History(added=['new nickname'], unchanged=(), deleted=['nickname'])
+
+ .. seealso::
+
+ :ref:`orm_mapper_inspection_instancestate`
+
+ """
+
+ session_id = None
+ key = None
+ runid = None
+ load_options = util.EMPTY_SET
+ load_path = PathRegistry.root
+ insert_order = None
+ _strong_obj = None
+ modified = False
+ expired = False
+ _deleted = False
+ _load_pending = False
+ _orphaned_outside_of_session = False
+ is_instance = True
+ identity_token = None
+ _last_known_values = ()
+
+ callables = ()
+ """A namespace where a per-state loader callable can be associated.
+
+ In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
+ loaders that were set up via query option.
+
+ Previously, callables was used also to indicate expired attributes
+ by storing a link to the InstanceState itself in this dictionary.
+ This role is now handled by the expired_attributes set.
+
+ """
+
+ def __init__(self, obj, manager):
+ self.class_ = obj.__class__
+ self.manager = manager
+ self.obj = weakref.ref(obj, self._cleanup)
+ self.committed_state = {}
+ self.expired_attributes = set()
+
+ expired_attributes = None
+ """The set of keys which are 'expired' to be loaded by
+ the manager's deferred scalar loader, assuming no pending
+ changes.
+
+ see also the ``unmodified`` collection which is intersected
+ against this set when a refresh operation occurs."""
+
+ @util.memoized_property
+ def attrs(self):
+ """Return a namespace representing each attribute on
+ the mapped object, including its current value
+ and history.
+
+ The returned object is an instance of :class:`.AttributeState`.
+ This object allows inspection of the current data
+ within an attribute as well as attribute history
+ since the last flush.
+
+ """
+ return util.ImmutableProperties(
+ dict((key, AttributeState(self, key)) for key in self.manager)
+ )
+
+ @property
+ def transient(self):
+ """Return ``True`` if the object is :term:`transient`.
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is None and not self._attached
+
+ @property
+ def pending(self):
+ """Return ``True`` if the object is :term:`pending`.
+
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is None and self._attached
+
+ @property
+ def deleted(self):
+ """Return ``True`` if the object is :term:`deleted`.
+
+ An object that is in the deleted state is guaranteed to
+ not be within the :attr:`.Session.identity_map` of its parent
+ :class:`.Session`; however if the session's transaction is rolled
+ back, the object will be restored to the persistent state and
+ the identity map.
+
+ .. note::
+
+ The :attr:`.InstanceState.deleted` attribute refers to a specific
+ state of the object that occurs between the "persistent" and
+ "detached" states; once the object is :term:`detached`, the
+ :attr:`.InstanceState.deleted` attribute **no longer returns
+ True**; in order to detect that a state was deleted, regardless
+ of whether or not the object is associated with a
+ :class:`.Session`, use the :attr:`.InstanceState.was_deleted`
+ accessor.
+
+ .. versionadded: 1.1
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is not None and self._attached and self._deleted
+
+ @property
+ def was_deleted(self):
+ """Return True if this object is or was previously in the
+ "deleted" state and has not been reverted to persistent.
+
+ This flag returns True once the object was deleted in flush.
+ When the object is expunged from the session either explicitly
+ or via transaction commit and enters the "detached" state,
+ this flag will continue to report True.
+
+ .. versionadded:: 1.1 - added a local method form of
+ :func:`.orm.util.was_deleted`.
+
+ .. seealso::
+
+ :attr:`.InstanceState.deleted` - refers to the "deleted" state
+
+ :func:`.orm.util.was_deleted` - standalone function
+
+ :ref:`session_object_states`
+
+ """
+ return self._deleted
+
+ @property
+ def persistent(self):
+ """Return ``True`` if the object is :term:`persistent`.
+
+ An object that is in the persistent state is guaranteed to
+ be within the :attr:`.Session.identity_map` of its parent
+ :class:`.Session`.
+
+ .. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
+ accessor no longer returns True for an object that was
+ "deleted" within a flush; use the :attr:`.InstanceState.deleted`
+ accessor to detect this state. This allows the "persistent"
+ state to guarantee membership in the identity map.
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is not None and self._attached and not self._deleted
+
+ @property
+ def detached(self):
+ """Return ``True`` if the object is :term:`detached`.
+
+ .. seealso::
+
+ :ref:`session_object_states`
+
+ """
+ return self.key is not None and not self._attached
+
+ @property
+ @util.preload_module("sqlalchemy.orm.session")
+ def _attached(self):
+ return (
+ self.session_id is not None
+ and self.session_id in util.preloaded.orm_session._sessions
+ )
+
+ def _track_last_known_value(self, key):
+ """Track the last known value of a particular key after expiration
+ operations.
+
+ .. versionadded:: 1.3
+
+ """
+
+ if key not in self._last_known_values:
+ self._last_known_values = dict(self._last_known_values)
+ self._last_known_values[key] = NO_VALUE
+
+ @property
+ def session(self):
+ """Return the owning :class:`.Session` for this instance,
+ or ``None`` if none available.
+
+ Note that the result here can in some cases be *different*
+ from that of ``obj in session``; an object that's been deleted
+ will report as not ``in session``, however if the transaction is
+ still in progress, this attribute will still refer to that session.
+ Only when the transaction is completed does the object become
+ fully detached under normal circumstances.
+
+ .. seealso::
+
+ :attr:`_orm.InstanceState.async_session`
+
+ """
+ if self.session_id:
+ try:
+ return _sessions[self.session_id]
+ except KeyError:
+ pass
+ return None
+
+ @property
+ def async_session(self):
+ """Return the owning :class:`_asyncio.AsyncSession` for this instance,
+ or ``None`` if none available.
+
+ This attribute is only non-None when the :mod:`sqlalchemy.ext.asyncio`
+ API is in use for this ORM object. The returned
+ :class:`_asyncio.AsyncSession` object will be a proxy for the
+ :class:`_orm.Session` object that would be returned from the
+ :attr:`_orm.InstanceState.session` attribute for this
+ :class:`_orm.InstanceState`.
+
+ .. versionadded:: 1.4.18
+
+ .. seealso::
+
+ :ref:`asyncio_toplevel`
+
+ """
+ if _async_provider is None:
+ return None
+
+ sess = self.session
+ if sess is not None:
+ return _async_provider(sess)
+ else:
+ return None
+
+ @property
+ def object(self):
+ """Return the mapped object represented by this
+ :class:`.InstanceState`."""
+ return self.obj()
+
+ @property
+ def identity(self):
+ """Return the mapped identity of the mapped object.
+ This is the primary key identity as persisted by the ORM
+ which can always be passed directly to
+ :meth:`_query.Query.get`.
+
+ Returns ``None`` if the object has no primary key identity.
+
+ .. note::
+ An object which is :term:`transient` or :term:`pending`
+ does **not** have a mapped identity until it is flushed,
+ even if its attributes include primary key values.
+
+ """
+ if self.key is None:
+ return None
+ else:
+ return self.key[1]
+
+ @property
+ def identity_key(self):
+ """Return the identity key for the mapped object.
+
+ This is the key used to locate the object within
+ the :attr:`.Session.identity_map` mapping. It contains
+ the identity as returned by :attr:`.identity` within it.
+
+
+ """
+ # TODO: just change .key to .identity_key across
+ # the board ? probably
+ return self.key
+
+ @util.memoized_property
+ def parents(self):
+ return {}
+
+ @util.memoized_property
+ def _pending_mutations(self):
+ return {}
+
+ @util.memoized_property
+ def _empty_collections(self):
+ return {}
+
+ @util.memoized_property
+ def mapper(self):
+ """Return the :class:`_orm.Mapper` used for this mapped object."""
+ return self.manager.mapper
+
+ @property
+ def has_identity(self):
+ """Return ``True`` if this object has an identity key.
+
+ This should always have the same value as the
+ expression ``state.persistent`` or ``state.detached``.
+
+ """
+ return bool(self.key)
+
+ @classmethod
+ def _detach_states(self, states, session, to_transient=False):
+ persistent_to_detached = (
+ session.dispatch.persistent_to_detached or None
+ )
+ deleted_to_detached = session.dispatch.deleted_to_detached or None
+ pending_to_transient = session.dispatch.pending_to_transient or None
+ persistent_to_transient = (
+ session.dispatch.persistent_to_transient or None
+ )
+
+ for state in states:
+ deleted = state._deleted
+ pending = state.key is None
+ persistent = not pending and not deleted
+
+ state.session_id = None
+
+ if to_transient and state.key:
+ del state.key
+ if persistent:
+ if to_transient:
+ if persistent_to_transient is not None:
+ persistent_to_transient(session, state)
+ elif persistent_to_detached is not None:
+ persistent_to_detached(session, state)
+ elif deleted and deleted_to_detached is not None:
+ deleted_to_detached(session, state)
+ elif pending and pending_to_transient is not None:
+ pending_to_transient(session, state)
+
+ state._strong_obj = None
+
+ def _detach(self, session=None):
+ if session:
+ InstanceState._detach_states([self], session)
+ else:
+ self.session_id = self._strong_obj = None
+
+ def _dispose(self):
+ self._detach()
+ del self.obj
+
+ def _cleanup(self, ref):
+ """Weakref callback cleanup.
+
+ This callable cleans out the state when it is being garbage
+ collected.
+
+ this _cleanup **assumes** that there are no strong refs to us!
+ Will not work otherwise!
+
+ """
+
+ # Python builtins become undefined during interpreter shutdown.
+ # Guard against exceptions during this phase, as the method cannot
+ # proceed in any case if builtins have been undefined.
+ if dict is None:
+ return
+
+ instance_dict = self._instance_dict()
+ if instance_dict is not None:
+ instance_dict._fast_discard(self)
+ del self._instance_dict
+
+ # we can't possibly be in instance_dict._modified
+ # b.c. this is weakref cleanup only, that set
+ # is strong referencing!
+ # assert self not in instance_dict._modified
+
+ self.session_id = self._strong_obj = None
+ del self.obj
+
+ def obj(self):
+ return None
+
+ @property
+ def dict(self):
+ """Return the instance dict used by the object.
+
+ Under normal circumstances, this is always synonymous
+ with the ``__dict__`` attribute of the mapped object,
+ unless an alternative instrumentation system has been
+ configured.
+
+ In the case that the actual object has been garbage
+ collected, this accessor returns a blank dictionary.
+
+ """
+ o = self.obj()
+ if o is not None:
+ return base.instance_dict(o)
+ else:
+ return {}
+
+ def _initialize_instance(*mixed, **kwargs):
+ self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
+ manager = self.manager
+
+ manager.dispatch.init(self, args, kwargs)
+
+ try:
+ return manager.original_init(*mixed[1:], **kwargs)
+ except:
+ with util.safe_reraise():
+ manager.dispatch.init_failure(self, args, kwargs)
+
+ def get_history(self, key, passive):
+ return self.manager[key].impl.get_history(self, self.dict, passive)
+
+ def get_impl(self, key):
+ return self.manager[key].impl
+
+ def _get_pending_mutation(self, key):
+ if key not in self._pending_mutations:
+ self._pending_mutations[key] = PendingCollection()
+ return self._pending_mutations[key]
+
+ def __getstate__(self):
+ state_dict = {"instance": self.obj()}
+ state_dict.update(
+ (k, self.__dict__[k])
+ for k in (
+ "committed_state",
+ "_pending_mutations",
+ "modified",
+ "expired",
+ "callables",
+ "key",
+ "parents",
+ "load_options",
+ "class_",
+ "expired_attributes",
+ "info",
+ )
+ if k in self.__dict__
+ )
+ if self.load_path:
+ state_dict["load_path"] = self.load_path.serialize()
+
+ state_dict["manager"] = self.manager._serialize(self, state_dict)
+
+ return state_dict
+
+ def __setstate__(self, state_dict):
+ inst = state_dict["instance"]
+ if inst is not None:
+ self.obj = weakref.ref(inst, self._cleanup)
+ self.class_ = inst.__class__
+ else:
+ # None being possible here generally new as of 0.7.4
+ # due to storage of state in "parents". "class_"
+ # also new.
+ self.obj = None
+ self.class_ = state_dict["class_"]
+
+ self.committed_state = state_dict.get("committed_state", {})
+ self._pending_mutations = state_dict.get("_pending_mutations", {})
+ self.parents = state_dict.get("parents", {})
+ self.modified = state_dict.get("modified", False)
+ self.expired = state_dict.get("expired", False)
+ if "info" in state_dict:
+ self.info.update(state_dict["info"])
+ if "callables" in state_dict:
+ self.callables = state_dict["callables"]
+
+ try:
+ self.expired_attributes = state_dict["expired_attributes"]
+ except KeyError:
+ self.expired_attributes = set()
+ # 0.9 and earlier compat
+ for k in list(self.callables):
+ if self.callables[k] is self:
+ self.expired_attributes.add(k)
+ del self.callables[k]
+ else:
+ if "expired_attributes" in state_dict:
+ self.expired_attributes = state_dict["expired_attributes"]
+ else:
+ self.expired_attributes = set()
+
+ self.__dict__.update(
+ [
+ (k, state_dict[k])
+ for k in ("key", "load_options")
+ if k in state_dict
+ ]
+ )
+ if self.key:
+ try:
+ self.identity_token = self.key[2]
+ except IndexError:
+ # 1.1 and earlier compat before identity_token
+ assert len(self.key) == 2
+ self.key = self.key + (None,)
+ self.identity_token = None
+
+ if "load_path" in state_dict:
+ self.load_path = PathRegistry.deserialize(state_dict["load_path"])
+
+ state_dict["manager"](self, inst, state_dict)
+
+ def _reset(self, dict_, key):
+ """Remove the given attribute and any
+ callables associated with it."""
+
+ old = dict_.pop(key, None)
+ if old is not None and self.manager[key].impl.collection:
+ self.manager[key].impl._invalidate_collection(old)
+ self.expired_attributes.discard(key)
+ if self.callables:
+ self.callables.pop(key, None)
+
+ def _copy_callables(self, from_):
+ if "callables" in from_.__dict__:
+ self.callables = dict(from_.callables)
+
+ @classmethod
+ def _instance_level_callable_processor(cls, manager, fn, key):
+ impl = manager[key].impl
+ if impl.collection:
+
+ def _set_callable(state, dict_, row):
+ if "callables" not in state.__dict__:
+ state.callables = {}
+ old = dict_.pop(key, None)
+ if old is not None:
+ impl._invalidate_collection(old)
+ state.callables[key] = fn
+
+ else:
+
+ def _set_callable(state, dict_, row):
+ if "callables" not in state.__dict__:
+ state.callables = {}
+ state.callables[key] = fn
+
+ return _set_callable
+
+ def _expire(self, dict_, modified_set):
+ self.expired = True
+ if self.modified:
+ modified_set.discard(self)
+ self.committed_state.clear()
+ self.modified = False
+
+ self._strong_obj = None
+
+ if "_pending_mutations" in self.__dict__:
+ del self.__dict__["_pending_mutations"]
+
+ if "parents" in self.__dict__:
+ del self.__dict__["parents"]
+
+ self.expired_attributes.update(
+ [impl.key for impl in self.manager._loader_impls]
+ )
+
+ if self.callables:
+ # the per state loader callables we can remove here are
+ # LoadDeferredColumns, which undefers a column at the instance
+ # level that is mapped with deferred, and LoadLazyAttribute,
+ # which lazy loads a relationship at the instance level that
+ # is mapped with "noload" or perhaps "immediateload".
+ # Before 1.4, only column-based
+ # attributes could be considered to be "expired", so here they
+ # were the only ones "unexpired", which means to make them deferred
+ # again. For the moment, as of 1.4 we also apply the same
+ # treatment relationships now, that is, an instance level lazy
+ # loader is reset in the same way as a column loader.
+ for k in self.expired_attributes.intersection(self.callables):
+ del self.callables[k]
+
+ for k in self.manager._collection_impl_keys.intersection(dict_):
+ collection = dict_.pop(k)
+ collection._sa_adapter.invalidated = True
+
+ if self._last_known_values:
+ self._last_known_values.update(
+ (k, dict_[k]) for k in self._last_known_values if k in dict_
+ )
+
+ for key in self.manager._all_key_set.intersection(dict_):
+ del dict_[key]
+
+ self.manager.dispatch.expire(self, None)
+
+ def _expire_attributes(self, dict_, attribute_names, no_loader=False):
+ pending = self.__dict__.get("_pending_mutations", None)
+
+ callables = self.callables
+
+ for key in attribute_names:
+ impl = self.manager[key].impl
+ if impl.accepts_scalar_loader:
+ if no_loader and (impl.callable_ or key in callables):
+ continue
+
+ self.expired_attributes.add(key)
+ if callables and key in callables:
+ del callables[key]
+ old = dict_.pop(key, NO_VALUE)
+ if impl.collection and old is not NO_VALUE:
+ impl._invalidate_collection(old)
+
+ if (
+ self._last_known_values
+ and key in self._last_known_values
+ and old is not NO_VALUE
+ ):
+ self._last_known_values[key] = old
+
+ self.committed_state.pop(key, None)
+ if pending:
+ pending.pop(key, None)
+
+ self.manager.dispatch.expire(self, attribute_names)
+
+ def _load_expired(self, state, passive):
+ """__call__ allows the InstanceState to act as a deferred
+ callable for loading expired attributes, which is also
+ serializable (picklable).
+
+ """
+
+ if not passive & SQL_OK:
+ return PASSIVE_NO_RESULT
+
+ toload = self.expired_attributes.intersection(self.unmodified)
+ toload = toload.difference(
+ attr
+ for attr in toload
+ if not self.manager[attr].impl.load_on_unexpire
+ )
+
+ self.manager.expired_attribute_loader(self, toload, passive)
+
+ # if the loader failed, or this
+ # instance state didn't have an identity,
+ # the attributes still might be in the callables
+ # dict. ensure they are removed.
+ self.expired_attributes.clear()
+
+ return ATTR_WAS_SET
+
+ @property
+ def unmodified(self):
+ """Return the set of keys which have no uncommitted changes"""
+
+ return set(self.manager).difference(self.committed_state)
+
+ def unmodified_intersection(self, keys):
+ """Return self.unmodified.intersection(keys)."""
+
+ return (
+ set(keys)
+ .intersection(self.manager)
+ .difference(self.committed_state)
+ )
+
+ @property
+ def unloaded(self):
+ """Return the set of keys which do not have a loaded value.
+
+ This includes expired attributes and any other attribute that
+ was never populated or modified.
+
+ """
+ return (
+ set(self.manager)
+ .difference(self.committed_state)
+ .difference(self.dict)
+ )
+
+ @property
+ def unloaded_expirable(self):
+ """Return the set of keys which do not have a loaded value.
+
+ This includes expired attributes and any other attribute that
+ was never populated or modified.
+
+ """
+ return self.unloaded
+
+ @property
+ def _unloaded_non_object(self):
+ return self.unloaded.intersection(
+ attr
+ for attr in self.manager
+ if self.manager[attr].impl.accepts_scalar_loader
+ )
+
+ def _instance_dict(self):
+ return None
+
+ def _modified_event(
+ self, dict_, attr, previous, collection=False, is_userland=False
+ ):
+ if attr:
+ if not attr.send_modified_events:
+ return
+ if is_userland and attr.key not in dict_:
+ raise sa_exc.InvalidRequestError(
+ "Can't flag attribute '%s' modified; it's not present in "
+ "the object state" % attr.key
+ )
+ if attr.key not in self.committed_state or is_userland:
+ if collection:
+ if previous is NEVER_SET:
+ if attr.key in dict_:
+ previous = dict_[attr.key]
+
+ if previous not in (None, NO_VALUE, NEVER_SET):
+ previous = attr.copy(previous)
+ self.committed_state[attr.key] = previous
+
+ if attr.key in self._last_known_values:
+ self._last_known_values[attr.key] = NO_VALUE
+
+ # assert self._strong_obj is None or self.modified
+
+ if (self.session_id and self._strong_obj is None) or not self.modified:
+ self.modified = True
+ instance_dict = self._instance_dict()
+ if instance_dict:
+ has_modified = bool(instance_dict._modified)
+ instance_dict._modified.add(self)
+ else:
+ has_modified = False
+
+ # only create _strong_obj link if attached
+ # to a session
+
+ inst = self.obj()
+ if self.session_id:
+ self._strong_obj = inst
+
+ # if identity map already had modified objects,
+ # assume autobegin already occurred, else check
+ # for autobegin
+ if not has_modified:
+ # inline of autobegin, to ensure session transaction
+ # snapshot is established
+ try:
+ session = _sessions[self.session_id]
+ except KeyError:
+ pass
+ else:
+ if session._transaction is None:
+ session._autobegin()
+
+ if inst is None and attr:
+ raise orm_exc.ObjectDereferencedError(
+ "Can't emit change event for attribute '%s' - "
+ "parent object of type %s has been garbage "
+ "collected."
+ % (self.manager[attr.key], base.state_class_str(self))
+ )
+
+ def _commit(self, dict_, keys):
+ """Commit attributes.
+
+ This is used by a partial-attribute load operation to mark committed
+ those attributes which were refreshed from the database.
+
+ Attributes marked as "expired" can potentially remain "expired" after
+ this step if a value was not populated in state.dict.
+
+ """
+ for key in keys:
+ self.committed_state.pop(key, None)
+
+ self.expired = False
+
+ self.expired_attributes.difference_update(
+ set(keys).intersection(dict_)
+ )
+
+ # the per-keys commit removes object-level callables,
+ # while that of commit_all does not. it's not clear
+ # if this behavior has a clear rationale, however tests do
+ # ensure this is what it does.
+ if self.callables:
+ for key in (
+ set(self.callables).intersection(keys).intersection(dict_)
+ ):
+ del self.callables[key]
+
+ def _commit_all(self, dict_, instance_dict=None):
+ """commit all attributes unconditionally.
+
+ This is used after a flush() or a full load/refresh
+ to remove all pending state from the instance.
+
+ - all attributes are marked as "committed"
+ - the "strong dirty reference" is removed
+ - the "modified" flag is set to False
+ - any "expired" markers for scalar attributes loaded are removed.
+ - lazy load callables for objects / collections *stay*
+
+ Attributes marked as "expired" can potentially remain
+ "expired" after this step if a value was not populated in state.dict.
+
+ """
+ self._commit_all_states([(self, dict_)], instance_dict)
+
+ @classmethod
+ def _commit_all_states(self, iter_, instance_dict=None):
+ """Mass / highly inlined version of commit_all()."""
+
+ for state, dict_ in iter_:
+ state_dict = state.__dict__
+
+ state.committed_state.clear()
+
+ if "_pending_mutations" in state_dict:
+ del state_dict["_pending_mutations"]
+
+ state.expired_attributes.difference_update(dict_)
+
+ if instance_dict and state.modified:
+ instance_dict._modified.discard(state)
+
+ state.modified = state.expired = False
+ state._strong_obj = None
+
+
+class AttributeState(object):
+ """Provide an inspection interface corresponding
+ to a particular attribute on a particular mapped object.
+
+ The :class:`.AttributeState` object is accessed
+ via the :attr:`.InstanceState.attrs` collection
+ of a particular :class:`.InstanceState`::
+
+ from sqlalchemy import inspect
+
+ insp = inspect(some_mapped_object)
+ attr_state = insp.attrs.some_attribute
+
+ """
+
+ def __init__(self, state, key):
+ self.state = state
+ self.key = key
+
+ @property
+ def loaded_value(self):
+ """The current value of this attribute as loaded from the database.
+
+ If the value has not been loaded, or is otherwise not present
+ in the object's dictionary, returns NO_VALUE.
+
+ """
+ return self.state.dict.get(self.key, NO_VALUE)
+
+ @property
+ def value(self):
+ """Return the value of this attribute.
+
+ This operation is equivalent to accessing the object's
+ attribute directly or via ``getattr()``, and will fire
+ off any pending loader callables if needed.
+
+ """
+ return self.state.manager[self.key].__get__(
+ self.state.obj(), self.state.class_
+ )
+
+ @property
+ def history(self):
+ """Return the current **pre-flush** change history for
+ this attribute, via the :class:`.History` interface.
+
+ This method will **not** emit loader callables if the value of the
+ attribute is unloaded.
+
+ .. note::
+
+ The attribute history system tracks changes on a **per flush
+ basis**. Each time the :class:`.Session` is flushed, the history
+ of each attribute is reset to empty. The :class:`.Session` by
+ default autoflushes each time a :class:`_query.Query` is invoked.
+ For
+ options on how to control this, see :ref:`session_flushing`.
+
+
+ .. seealso::
+
+ :meth:`.AttributeState.load_history` - retrieve history
+ using loader callables if the value is not locally present.
+
+ :func:`.attributes.get_history` - underlying function
+
+ """
+ return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE)
+
+ def load_history(self):
+ """Return the current **pre-flush** change history for
+ this attribute, via the :class:`.History` interface.
+
+ This method **will** emit loader callables if the value of the
+ attribute is unloaded.
+
+ .. note::
+
+ The attribute history system tracks changes on a **per flush
+ basis**. Each time the :class:`.Session` is flushed, the history
+ of each attribute is reset to empty. The :class:`.Session` by
+ default autoflushes each time a :class:`_query.Query` is invoked.
+ For
+ options on how to control this, see :ref:`session_flushing`.
+
+ .. seealso::
+
+ :attr:`.AttributeState.history`
+
+ :func:`.attributes.get_history` - underlying function
+
+ .. versionadded:: 0.9.0
+
+ """
+ return self.state.get_history(self.key, PASSIVE_OFF ^ INIT_OK)
+
+
+class PendingCollection(object):
+ """A writable placeholder for an unloaded collection.
+
+ Stores items appended to and removed from a collection that has not yet
+ been loaded. When the collection is loaded, the changes stored in
+ PendingCollection are applied to it to produce the final result.
+
+ """
+
+ def __init__(self):
+ self.deleted_items = util.IdentitySet()
+ self.added_items = util.OrderedIdentitySet()
+
+ def append(self, value):
+ if value in self.deleted_items:
+ self.deleted_items.remove(value)
+ else:
+ self.added_items.add(value)
+
+ def remove(self, value):
+ if value in self.added_items:
+ self.added_items.remove(value)
+ else:
+ self.deleted_items.add(value)
diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py
new file mode 100644
index 0000000..71aae00
--- /dev/null
+++ b/lib/sqlalchemy/orm/strategies.py
@@ -0,0 +1,3141 @@
+# orm/strategies.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""sqlalchemy.orm.interfaces.LoaderStrategy
+ implementations, and related MapperOptions."""
+from __future__ import absolute_import
+
+import collections
+import itertools
+
+from . import attributes
+from . import exc as orm_exc
+from . import interfaces
+from . import loading
+from . import path_registry
+from . import properties
+from . import query
+from . import relationships
+from . import unitofwork
+from . import util as orm_util
+from .base import _DEFER_FOR_STATE
+from .base import _RAISE_FOR_STATE
+from .base import _SET_DEFERRED_EXPIRED
+from .context import _column_descriptions
+from .context import ORMCompileState
+from .context import ORMSelectCompileState
+from .context import QueryContext
+from .interfaces import LoaderStrategy
+from .interfaces import StrategizedProperty
+from .session import _state_session
+from .state import InstanceState
+from .util import _none_set
+from .util import aliased
+from .. import event
+from .. import exc as sa_exc
+from .. import inspect
+from .. import log
+from .. import sql
+from .. import util
+from ..sql import util as sql_util
+from ..sql import visitors
+from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
+from ..sql.selectable import Select
+
+
+def _register_attribute(
+ prop,
+ mapper,
+ useobject,
+ compare_function=None,
+ typecallable=None,
+ callable_=None,
+ proxy_property=None,
+ active_history=False,
+ impl_class=None,
+ **kw
+):
+
+ listen_hooks = []
+
+ uselist = useobject and prop.uselist
+
+ if useobject and prop.single_parent:
+ listen_hooks.append(single_parent_validator)
+
+ if prop.key in prop.parent.validators:
+ fn, opts = prop.parent.validators[prop.key]
+ listen_hooks.append(
+ lambda desc, prop: orm_util._validator_events(
+ desc, prop.key, fn, **opts
+ )
+ )
+
+ if useobject:
+ listen_hooks.append(unitofwork.track_cascade_events)
+
+ # need to assemble backref listeners
+ # after the singleparentvalidator, mapper validator
+ if useobject:
+ backref = prop.back_populates
+ if backref and prop._effective_sync_backref:
+ listen_hooks.append(
+ lambda desc, prop: attributes.backref_listeners(
+ desc, backref, uselist
+ )
+ )
+
+ # a single MapperProperty is shared down a class inheritance
+ # hierarchy, so we set up attribute instrumentation and backref event
+ # for each mapper down the hierarchy.
+
+ # typically, "mapper" is the same as prop.parent, due to the way
+ # the configure_mappers() process runs, however this is not strongly
+ # enforced, and in the case of a second configure_mappers() run the
+ # mapper here might not be prop.parent; also, a subclass mapper may
+ # be called here before a superclass mapper. That is, can't depend
+ # on mappers not already being set up so we have to check each one.
+
+ for m in mapper.self_and_descendants:
+ if prop is m._props.get(
+ prop.key
+ ) and not m.class_manager._attr_has_impl(prop.key):
+
+ desc = attributes.register_attribute_impl(
+ m.class_,
+ prop.key,
+ parent_token=prop,
+ uselist=uselist,
+ compare_function=compare_function,
+ useobject=useobject,
+ trackparent=useobject
+ and (
+ prop.single_parent
+ or prop.direction is interfaces.ONETOMANY
+ ),
+ typecallable=typecallable,
+ callable_=callable_,
+ active_history=active_history,
+ impl_class=impl_class,
+ send_modified_events=not useobject or not prop.viewonly,
+ doc=prop.doc,
+ **kw
+ )
+
+ for hook in listen_hooks:
+ hook(desc, prop)
+
+
+@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
+class UninstrumentedColumnLoader(LoaderStrategy):
+ """Represent a non-instrumented MapperProperty.
+
+ The polymorphic_on argument of mapper() often results in this,
+ if the argument is against the with_polymorphic selectable.
+
+ """
+
+ __slots__ = ("columns",)
+
+ def __init__(self, parent, strategy_key):
+ super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
+ self.columns = self.parent_property.columns
+
+ def setup_query(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection=None,
+ **kwargs
+ ):
+ for c in self.columns:
+ if adapter:
+ c = adapter.columns[c]
+ compile_state._append_dedupe_col_collection(c, column_collection)
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ pass
+
+
+@log.class_logger
+@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
+class ColumnLoader(LoaderStrategy):
+ """Provide loading behavior for a :class:`.ColumnProperty`."""
+
+ __slots__ = "columns", "is_composite"
+
+ def __init__(self, parent, strategy_key):
+ super(ColumnLoader, self).__init__(parent, strategy_key)
+ self.columns = self.parent_property.columns
+ self.is_composite = hasattr(self.parent_property, "composite_class")
+
+ def setup_query(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ memoized_populators,
+ check_for_adapt=False,
+ **kwargs
+ ):
+ for c in self.columns:
+ if adapter:
+ if check_for_adapt:
+ c = adapter.adapt_check_present(c)
+ if c is None:
+ return
+ else:
+ c = adapter.columns[c]
+
+ compile_state._append_dedupe_col_collection(c, column_collection)
+
+ fetch = self.columns[0]
+ if adapter:
+ fetch = adapter.columns[fetch]
+ memoized_populators[self.parent_property] = fetch
+
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+ coltype = self.columns[0].type
+ # TODO: check all columns ? check for foreign key as well?
+ active_history = (
+ self.parent_property.active_history
+ or self.columns[0].primary_key
+ or (
+ mapper.version_id_col is not None
+ and mapper._columntoproperty.get(mapper.version_id_col, None)
+ is self.parent_property
+ )
+ )
+
+ _register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=False,
+ compare_function=coltype.compare_values,
+ active_history=active_history,
+ )
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ # look through list of columns represented here
+ # to see which, if any, is present in the row.
+ for col in self.columns:
+ if adapter:
+ col = adapter.columns[col]
+ getter = result._getter(col, False)
+ if getter:
+ populators["quick"].append((self.key, getter))
+ break
+ else:
+ populators["expire"].append((self.key, True))
+
+
+@log.class_logger
+@properties.ColumnProperty.strategy_for(query_expression=True)
+class ExpressionColumnLoader(ColumnLoader):
+ def __init__(self, parent, strategy_key):
+ super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
+
+ # compare to the "default" expression that is mapped in
+ # the column. If it's sql.null, we don't need to render
+ # unless an expr is passed in the options.
+ null = sql.null().label(None)
+ self._have_default_expression = any(
+ not c.compare(null) for c in self.parent_property.columns
+ )
+
+ def setup_query(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ memoized_populators,
+ **kwargs
+ ):
+ columns = None
+ if loadopt and "expression" in loadopt.local_opts:
+ columns = [loadopt.local_opts["expression"]]
+ elif self._have_default_expression:
+ columns = self.parent_property.columns
+
+ if columns is None:
+ return
+
+ for c in columns:
+ if adapter:
+ c = adapter.columns[c]
+ compile_state._append_dedupe_col_collection(c, column_collection)
+
+ fetch = columns[0]
+ if adapter:
+ fetch = adapter.columns[fetch]
+ memoized_populators[self.parent_property] = fetch
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ # look through list of columns represented here
+ # to see which, if any, is present in the row.
+ if loadopt and "expression" in loadopt.local_opts:
+ columns = [loadopt.local_opts["expression"]]
+
+ for col in columns:
+ if adapter:
+ col = adapter.columns[col]
+ getter = result._getter(col, False)
+ if getter:
+ populators["quick"].append((self.key, getter))
+ break
+ else:
+ populators["expire"].append((self.key, True))
+
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+
+ _register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=False,
+ compare_function=self.columns[0].type.compare_values,
+ accepts_scalar_loader=False,
+ )
+
+
+@log.class_logger
+@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
+@properties.ColumnProperty.strategy_for(
+ deferred=True, instrument=True, raiseload=True
+)
+@properties.ColumnProperty.strategy_for(do_nothing=True)
+class DeferredColumnLoader(LoaderStrategy):
+ """Provide loading behavior for a deferred :class:`.ColumnProperty`."""
+
+ __slots__ = "columns", "group", "raiseload"
+
+ def __init__(self, parent, strategy_key):
+ super(DeferredColumnLoader, self).__init__(parent, strategy_key)
+ if hasattr(self.parent_property, "composite_class"):
+ raise NotImplementedError(
+ "Deferred loading for composite " "types not implemented yet"
+ )
+ self.raiseload = self.strategy_opts.get("raiseload", False)
+ self.columns = self.parent_property.columns
+ self.group = self.parent_property.group
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+
+ # for a DeferredColumnLoader, this method is only used during a
+ # "row processor only" query; see test_deferred.py ->
+ # tests with "rowproc_only" in their name. As of the 1.0 series,
+ # loading._instance_processor doesn't use a "row processing" function
+ # to populate columns, instead it uses data in the "populators"
+ # dictionary. Normally, the DeferredColumnLoader.setup_query()
+ # sets up that data in the "memoized_populators" dictionary
+ # and "create_row_processor()" here is never invoked.
+
+ if (
+ context.refresh_state
+ and context.query._compile_options._only_load_props
+ and self.key in context.query._compile_options._only_load_props
+ ):
+ self.parent_property._get_strategy(
+ (("deferred", False), ("instrument", True))
+ ).create_row_processor(
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+
+ elif not self.is_class_level:
+ if self.raiseload:
+ set_deferred_for_local_state = (
+ self.parent_property._raise_column_loader
+ )
+ else:
+ set_deferred_for_local_state = (
+ self.parent_property._deferred_column_loader
+ )
+ populators["new"].append((self.key, set_deferred_for_local_state))
+ else:
+ populators["expire"].append((self.key, False))
+
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+
+ _register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=False,
+ compare_function=self.columns[0].type.compare_values,
+ callable_=self._load_for_state,
+ load_on_unexpire=False,
+ )
+
+ def setup_query(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ memoized_populators,
+ only_load_props=None,
+ **kw
+ ):
+
+ if (
+ (
+ compile_state.compile_options._render_for_subquery
+ and self.parent_property._renders_in_subqueries
+ )
+ or (
+ loadopt
+ and "undefer_pks" in loadopt.local_opts
+ and set(self.columns).intersection(
+ self.parent._should_undefer_in_wildcard
+ )
+ )
+ or (
+ loadopt
+ and self.group
+ and loadopt.local_opts.get(
+ "undefer_group_%s" % self.group, False
+ )
+ )
+ or (only_load_props and self.key in only_load_props)
+ ):
+ self.parent_property._get_strategy(
+ (("deferred", False), ("instrument", True))
+ ).setup_query(
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ memoized_populators,
+ **kw
+ )
+ elif self.is_class_level:
+ memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
+ elif not self.raiseload:
+ memoized_populators[self.parent_property] = _DEFER_FOR_STATE
+ else:
+ memoized_populators[self.parent_property] = _RAISE_FOR_STATE
+
+ def _load_for_state(self, state, passive):
+ if not state.key:
+ return attributes.ATTR_EMPTY
+
+ if not passive & attributes.SQL_OK:
+ return attributes.PASSIVE_NO_RESULT
+
+ localparent = state.manager.mapper
+
+ if self.group:
+ toload = [
+ p.key
+ for p in localparent.iterate_properties
+ if isinstance(p, StrategizedProperty)
+ and isinstance(p.strategy, DeferredColumnLoader)
+ and p.group == self.group
+ ]
+ else:
+ toload = [self.key]
+
+ # narrow the keys down to just those which have no history
+ group = [k for k in toload if k in state.unmodified]
+
+ session = _state_session(state)
+ if session is None:
+ raise orm_exc.DetachedInstanceError(
+ "Parent instance %s is not bound to a Session; "
+ "deferred load operation of attribute '%s' cannot proceed"
+ % (orm_util.state_str(state), self.key)
+ )
+
+ if self.raiseload:
+ self._invoke_raise_load(state, passive, "raise")
+
+ if (
+ loading.load_on_ident(
+ session,
+ sql.select(localparent).set_label_style(
+ LABEL_STYLE_TABLENAME_PLUS_COL
+ ),
+ state.key,
+ only_load_props=group,
+ refresh_state=state,
+ )
+ is None
+ ):
+ raise orm_exc.ObjectDeletedError(state)
+
+ return attributes.ATTR_WAS_SET
+
+ def _invoke_raise_load(self, state, passive, lazy):
+ raise sa_exc.InvalidRequestError(
+ "'%s' is not available due to raiseload=True" % (self,)
+ )
+
+
+class LoadDeferredColumns(object):
+ """serializable loader object used by DeferredColumnLoader"""
+
+ def __init__(self, key, raiseload=False):
+ self.key = key
+ self.raiseload = raiseload
+
+ def __call__(self, state, passive=attributes.PASSIVE_OFF):
+ key = self.key
+
+ localparent = state.manager.mapper
+ prop = localparent._props[key]
+ if self.raiseload:
+ strategy_key = (
+ ("deferred", True),
+ ("instrument", True),
+ ("raiseload", True),
+ )
+ else:
+ strategy_key = (("deferred", True), ("instrument", True))
+ strategy = prop._get_strategy(strategy_key)
+ return strategy._load_for_state(state, passive)
+
+
+class AbstractRelationshipLoader(LoaderStrategy):
+ """LoaderStratgies which deal with related objects."""
+
+ __slots__ = "mapper", "target", "uselist", "entity"
+
+ def __init__(self, parent, strategy_key):
+ super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
+ self.mapper = self.parent_property.mapper
+ self.entity = self.parent_property.entity
+ self.target = self.parent_property.target
+ self.uselist = self.parent_property.uselist
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(do_nothing=True)
+class DoNothingLoader(LoaderStrategy):
+ """Relationship loader that makes no change to the object's state.
+
+ Compared to NoLoader, this loader does not initialize the
+ collection/attribute to empty/none; the usual default LazyLoader will
+ take effect.
+
+ """
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy="noload")
+@relationships.RelationshipProperty.strategy_for(lazy=None)
+class NoLoader(AbstractRelationshipLoader):
+ """Provide loading behavior for a :class:`.RelationshipProperty`
+ with "lazy=None".
+
+ """
+
+ __slots__ = ()
+
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+
+ _register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=True,
+ typecallable=self.parent_property.collection_class,
+ )
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ def invoke_no_load(state, dict_, row):
+ if self.uselist:
+ attributes.init_state_collection(state, dict_, self.key)
+ else:
+ dict_[self.key] = None
+
+ populators["new"].append((self.key, invoke_no_load))
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy=True)
+@relationships.RelationshipProperty.strategy_for(lazy="select")
+@relationships.RelationshipProperty.strategy_for(lazy="raise")
+@relationships.RelationshipProperty.strategy_for(lazy="raise_on_sql")
+@relationships.RelationshipProperty.strategy_for(lazy="baked_select")
+class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
+ """Provide loading behavior for a :class:`.RelationshipProperty`
+ with "lazy=True", that is loads when first accessed.
+
+ """
+
+ __slots__ = (
+ "_lazywhere",
+ "_rev_lazywhere",
+ "_lazyload_reverse_option",
+ "_order_by",
+ "use_get",
+ "is_aliased_class",
+ "_bind_to_col",
+ "_equated_columns",
+ "_rev_bind_to_col",
+ "_rev_equated_columns",
+ "_simple_lazy_clause",
+ "_raise_always",
+ "_raise_on_sql",
+ )
+
+ def __init__(self, parent, strategy_key):
+ super(LazyLoader, self).__init__(parent, strategy_key)
+ self._raise_always = self.strategy_opts["lazy"] == "raise"
+ self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
+
+ self.is_aliased_class = inspect(self.entity).is_aliased_class
+
+ join_condition = self.parent_property._join_condition
+ (
+ self._lazywhere,
+ self._bind_to_col,
+ self._equated_columns,
+ ) = join_condition.create_lazy_clause()
+
+ (
+ self._rev_lazywhere,
+ self._rev_bind_to_col,
+ self._rev_equated_columns,
+ ) = join_condition.create_lazy_clause(reverse_direction=True)
+
+ if self.parent_property.order_by:
+ self._order_by = [
+ sql_util._deep_annotate(elem, {"_orm_adapt": True})
+ for elem in util.to_list(self.parent_property.order_by)
+ ]
+ else:
+ self._order_by = None
+
+ self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
+
+ # determine if our "lazywhere" clause is the same as the mapper's
+ # get() clause. then we can just use mapper.get()
+ #
+ # TODO: the "not self.uselist" can be taken out entirely; a m2o
+ # load that populates for a list (very unusual, but is possible with
+ # the API) can still set for "None" and the attribute system will
+ # populate as an empty list.
+ self.use_get = (
+ not self.is_aliased_class
+ and not self.uselist
+ and self.entity._get_clause[0].compare(
+ self._lazywhere,
+ use_proxies=True,
+ compare_keys=False,
+ equivalents=self.mapper._equivalent_columns,
+ )
+ )
+
+ if self.use_get:
+ for col in list(self._equated_columns):
+ if col in self.mapper._equivalent_columns:
+ for c in self.mapper._equivalent_columns[col]:
+ self._equated_columns[c] = self._equated_columns[col]
+
+ self.logger.info(
+ "%s will use Session.get() to " "optimize instance loads", self
+ )
+
+ def init_class_attribute(self, mapper):
+ self.is_class_level = True
+
+ _legacy_inactive_history_style = (
+ self.parent_property._legacy_inactive_history_style
+ )
+
+ if self.parent_property.active_history:
+ active_history = True
+ _deferred_history = False
+
+ elif (
+ self.parent_property.direction is not interfaces.MANYTOONE
+ or not self.use_get
+ ):
+ if _legacy_inactive_history_style:
+ active_history = True
+ _deferred_history = False
+ else:
+ active_history = False
+ _deferred_history = True
+ else:
+ active_history = _deferred_history = False
+
+ _register_attribute(
+ self.parent_property,
+ mapper,
+ useobject=True,
+ callable_=self._load_for_state,
+ typecallable=self.parent_property.collection_class,
+ active_history=active_history,
+ _deferred_history=_deferred_history,
+ )
+
+ def _memoized_attr__simple_lazy_clause(self):
+
+ lazywhere = sql_util._deep_annotate(
+ self._lazywhere, {"_orm_adapt": True}
+ )
+
+ criterion, bind_to_col = (lazywhere, self._bind_to_col)
+
+ params = []
+
+ def visit_bindparam(bindparam):
+ bindparam.unique = False
+
+ visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
+
+ def visit_bindparam(bindparam):
+ if bindparam._identifying_key in bind_to_col:
+ params.append(
+ (
+ bindparam.key,
+ bind_to_col[bindparam._identifying_key],
+ None,
+ )
+ )
+ elif bindparam.callable is None:
+ params.append((bindparam.key, None, bindparam.value))
+
+ criterion = visitors.cloned_traverse(
+ criterion, {}, {"bindparam": visit_bindparam}
+ )
+
+ return criterion, params
+
+ def _generate_lazy_clause(self, state, passive):
+ criterion, param_keys = self._simple_lazy_clause
+
+ if state is None:
+ return sql_util.adapt_criterion_to_null(
+ criterion, [key for key, ident, value in param_keys]
+ )
+
+ mapper = self.parent_property.parent
+
+ o = state.obj() # strong ref
+ dict_ = attributes.instance_dict(o)
+
+ if passive & attributes.INIT_OK:
+ passive ^= attributes.INIT_OK
+
+ params = {}
+ for key, ident, value in param_keys:
+ if ident is not None:
+ if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
+ value = mapper._get_committed_state_attr_by_column(
+ state, dict_, ident, passive
+ )
+ else:
+ value = mapper._get_state_attr_by_column(
+ state, dict_, ident, passive
+ )
+
+ params[key] = value
+
+ return criterion, params
+
+ def _invoke_raise_load(self, state, passive, lazy):
+ raise sa_exc.InvalidRequestError(
+ "'%s' is not available due to lazy='%s'" % (self, lazy)
+ )
+
+ def _load_for_state(self, state, passive, loadopt=None, extra_criteria=()):
+ if not state.key and (
+ (
+ not self.parent_property.load_on_pending
+ and not state._load_pending
+ )
+ or not state.session_id
+ ):
+ return attributes.ATTR_EMPTY
+
+ pending = not state.key
+ primary_key_identity = None
+
+ use_get = self.use_get and (not loadopt or not loadopt._extra_criteria)
+
+ if (not passive & attributes.SQL_OK and not use_get) or (
+ not passive & attributes.NON_PERSISTENT_OK and pending
+ ):
+ return attributes.PASSIVE_NO_RESULT
+
+ if (
+ # we were given lazy="raise"
+ self._raise_always
+ # the no_raise history-related flag was not passed
+ and not passive & attributes.NO_RAISE
+ and (
+ # if we are use_get and related_object_ok is disabled,
+ # which means we are at most looking in the identity map
+ # for history purposes or otherwise returning
+ # PASSIVE_NO_RESULT, don't raise. This is also a
+ # history-related flag
+ not use_get
+ or passive & attributes.RELATED_OBJECT_OK
+ )
+ ):
+
+ self._invoke_raise_load(state, passive, "raise")
+
+ session = _state_session(state)
+ if not session:
+ if passive & attributes.NO_RAISE:
+ return attributes.PASSIVE_NO_RESULT
+
+ raise orm_exc.DetachedInstanceError(
+ "Parent instance %s is not bound to a Session; "
+ "lazy load operation of attribute '%s' cannot proceed"
+ % (orm_util.state_str(state), self.key)
+ )
+
+ # if we have a simple primary key load, check the
+ # identity map without generating a Query at all
+ if use_get:
+ primary_key_identity = self._get_ident_for_use_get(
+ session, state, passive
+ )
+ if attributes.PASSIVE_NO_RESULT in primary_key_identity:
+ return attributes.PASSIVE_NO_RESULT
+ elif attributes.NEVER_SET in primary_key_identity:
+ return attributes.NEVER_SET
+
+ if _none_set.issuperset(primary_key_identity):
+ return None
+
+ if (
+ self.key in state.dict
+ and not passive & attributes.DEFERRED_HISTORY_LOAD
+ ):
+ return attributes.ATTR_WAS_SET
+
+ # look for this identity in the identity map. Delegate to the
+ # Query class in use, as it may have special rules for how it
+ # does this, including how it decides what the correct
+ # identity_token would be for this identity.
+
+ instance = session._identity_lookup(
+ self.entity,
+ primary_key_identity,
+ passive=passive,
+ lazy_loaded_from=state,
+ )
+
+ if instance is not None:
+ if instance is attributes.PASSIVE_CLASS_MISMATCH:
+ return None
+ else:
+ return instance
+ elif (
+ not passive & attributes.SQL_OK
+ or not passive & attributes.RELATED_OBJECT_OK
+ ):
+ return attributes.PASSIVE_NO_RESULT
+
+ return self._emit_lazyload(
+ session,
+ state,
+ primary_key_identity,
+ passive,
+ loadopt,
+ extra_criteria,
+ )
+
+ def _get_ident_for_use_get(self, session, state, passive):
+ instance_mapper = state.manager.mapper
+
+ if passive & attributes.LOAD_AGAINST_COMMITTED:
+ get_attr = instance_mapper._get_committed_state_attr_by_column
+ else:
+ get_attr = instance_mapper._get_state_attr_by_column
+
+ dict_ = state.dict
+
+ return [
+ get_attr(state, dict_, self._equated_columns[pk], passive=passive)
+ for pk in self.mapper.primary_key
+ ]
+
+ @util.preload_module("sqlalchemy.orm.strategy_options")
+ def _emit_lazyload(
+ self,
+ session,
+ state,
+ primary_key_identity,
+ passive,
+ loadopt,
+ extra_criteria,
+ ):
+ strategy_options = util.preloaded.orm_strategy_options
+
+ clauseelement = self.entity.__clause_element__()
+ stmt = Select._create_raw_select(
+ _raw_columns=[clauseelement],
+ _propagate_attrs=clauseelement._propagate_attrs,
+ _label_style=LABEL_STYLE_TABLENAME_PLUS_COL,
+ _compile_options=ORMCompileState.default_compile_options,
+ )
+ load_options = QueryContext.default_load_options
+
+ load_options += {
+ "_invoke_all_eagers": False,
+ "_lazy_loaded_from": state,
+ }
+
+ if self.parent_property.secondary is not None:
+ stmt = stmt.select_from(
+ self.mapper, self.parent_property.secondary
+ )
+
+ pending = not state.key
+
+ # don't autoflush on pending
+ if pending or passive & attributes.NO_AUTOFLUSH:
+ stmt._execution_options = util.immutabledict({"autoflush": False})
+
+ use_get = self.use_get
+
+ if state.load_options or (loadopt and loadopt._extra_criteria):
+ effective_path = state.load_path[self.parent_property]
+
+ opts = tuple(state.load_options)
+
+ if loadopt and loadopt._extra_criteria:
+ use_get = False
+ opts += (
+ orm_util.LoaderCriteriaOption(self.entity, extra_criteria),
+ )
+
+ stmt._with_options = opts
+ else:
+ # this path is used if there are not already any options
+ # in the query, but an event may want to add them
+ effective_path = state.mapper._path_registry[self.parent_property]
+
+ stmt._compile_options += {"_current_path": effective_path}
+
+ if use_get:
+ if self._raise_on_sql and not passive & attributes.NO_RAISE:
+ self._invoke_raise_load(state, passive, "raise_on_sql")
+
+ return loading.load_on_pk_identity(
+ session, stmt, primary_key_identity, load_options=load_options
+ )
+
+ if self._order_by:
+ stmt._order_by_clauses = self._order_by
+
+ def _lazyload_reverse(compile_context):
+ for rev in self.parent_property._reverse_property:
+ # reverse props that are MANYTOONE are loading *this*
+ # object from get(), so don't need to eager out to those.
+ if (
+ rev.direction is interfaces.MANYTOONE
+ and rev._use_get
+ and not isinstance(rev.strategy, LazyLoader)
+ ):
+ strategy_options.Load.for_existing_path(
+ compile_context.compile_options._current_path[
+ rev.parent
+ ]
+ ).lazyload(rev).process_compile_state(compile_context)
+
+ stmt._with_context_options += (
+ (_lazyload_reverse, self.parent_property),
+ )
+
+ lazy_clause, params = self._generate_lazy_clause(state, passive)
+
+ execution_options = {
+ "_sa_orm_load_options": load_options,
+ }
+
+ if (
+ self.key in state.dict
+ and not passive & attributes.DEFERRED_HISTORY_LOAD
+ ):
+ return attributes.ATTR_WAS_SET
+
+ if pending:
+ if util.has_intersection(orm_util._none_set, params.values()):
+ return None
+
+ elif util.has_intersection(orm_util._never_set, params.values()):
+ return None
+
+ if self._raise_on_sql and not passive & attributes.NO_RAISE:
+ self._invoke_raise_load(state, passive, "raise_on_sql")
+
+ stmt._where_criteria = (lazy_clause,)
+
+ result = session.execute(
+ stmt, params, execution_options=execution_options
+ )
+
+ result = result.unique().scalars().all()
+
+ if self.uselist:
+ return result
+ else:
+ l = len(result)
+ if l:
+ if l > 1:
+ util.warn(
+ "Multiple rows returned with "
+ "uselist=False for lazily-loaded attribute '%s' "
+ % self.parent_property
+ )
+
+ return result[0]
+ else:
+ return None
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ key = self.key
+
+ if not self.is_class_level or (loadopt and loadopt._extra_criteria):
+ # we are not the primary manager for this attribute
+ # on this class - set up a
+ # per-instance lazyloader, which will override the
+ # class-level behavior.
+ # this currently only happens when using a
+ # "lazyload" option on a "no load"
+ # attribute - "eager" attributes always have a
+ # class-level lazyloader installed.
+ set_lazy_callable = (
+ InstanceState._instance_level_callable_processor
+ )(
+ mapper.class_manager,
+ LoadLazyAttribute(
+ key,
+ self,
+ loadopt,
+ loadopt._generate_extra_criteria(context)
+ if loadopt._extra_criteria
+ else None,
+ ),
+ key,
+ )
+
+ populators["new"].append((self.key, set_lazy_callable))
+ elif context.populate_existing or mapper.always_refresh:
+
+ def reset_for_lazy_callable(state, dict_, row):
+ # we are the primary manager for this attribute on
+ # this class - reset its
+ # per-instance attribute state, so that the class-level
+ # lazy loader is
+ # executed when next referenced on this instance.
+ # this is needed in
+ # populate_existing() types of scenarios to reset
+ # any existing state.
+ state._reset(dict_, key)
+
+ populators["new"].append((self.key, reset_for_lazy_callable))
+
+
+class LoadLazyAttribute(object):
+ """semi-serializable loader object used by LazyLoader
+
+ Historically, this object would be carried along with instances that
+ needed to run lazyloaders, so it had to be serializable to support
+ cached instances.
+
+ this is no longer a general requirement, and the case where this object
+ is used is exactly the case where we can't really serialize easily,
+ which is when extra criteria in the loader option is present.
+
+ We can't reliably serialize that as it refers to mapped entities and
+ AliasedClass objects that are local to the current process, which would
+ need to be matched up on deserialize e.g. the sqlalchemy.ext.serializer
+ approach.
+
+ """
+
+ def __init__(self, key, initiating_strategy, loadopt, extra_criteria):
+ self.key = key
+ self.strategy_key = initiating_strategy.strategy_key
+ self.loadopt = loadopt
+ self.extra_criteria = extra_criteria
+
+ def __getstate__(self):
+ if self.extra_criteria is not None:
+ util.warn(
+ "Can't reliably serialize a lazyload() option that "
+ "contains additional criteria; please use eager loading "
+ "for this case"
+ )
+ return {
+ "key": self.key,
+ "strategy_key": self.strategy_key,
+ "loadopt": self.loadopt,
+ "extra_criteria": (),
+ }
+
+ def __call__(self, state, passive=attributes.PASSIVE_OFF):
+ key = self.key
+ instance_mapper = state.manager.mapper
+ prop = instance_mapper._props[key]
+ strategy = prop._strategies[self.strategy_key]
+
+ return strategy._load_for_state(
+ state,
+ passive,
+ loadopt=self.loadopt,
+ extra_criteria=self.extra_criteria,
+ )
+
+
+class PostLoader(AbstractRelationshipLoader):
+ """A relationship loader that emits a second SELECT statement."""
+
+ def _check_recursive_postload(self, context, path, join_depth=None):
+ effective_path = (
+ context.compile_state.current_path or orm_util.PathRegistry.root
+ ) + path
+
+ if loading.PostLoad.path_exists(
+ context, effective_path, self.parent_property
+ ):
+ return True
+
+ path_w_prop = path[self.parent_property]
+ effective_path_w_prop = effective_path[self.parent_property]
+
+ if not path_w_prop.contains(context.attributes, "loader"):
+ if join_depth:
+ if effective_path_w_prop.length / 2 > join_depth:
+ return True
+ elif effective_path_w_prop.contains_mapper(self.mapper):
+ return True
+
+ return False
+
+ def _immediateload_create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ return self.parent_property._get_strategy(
+ (("lazy", "immediate"),)
+ ).create_row_processor(
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+
+
+@relationships.RelationshipProperty.strategy_for(lazy="immediate")
+class ImmediateLoader(PostLoader):
+ __slots__ = ()
+
+ def init_class_attribute(self, mapper):
+ self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ ).init_class_attribute(mapper)
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ def load_immediate(state, dict_, row):
+ state.get_impl(self.key).get(state, dict_, flags)
+
+ if self._check_recursive_postload(context, path):
+ # this will not emit SQL and will only emit for a many-to-one
+ # "use get" load. the "_RELATED" part means it may return
+ # instance even if its expired, since this is a mutually-recursive
+ # load operation.
+ flags = attributes.PASSIVE_NO_FETCH_RELATED | attributes.NO_RAISE
+ else:
+ flags = attributes.PASSIVE_OFF | attributes.NO_RAISE
+
+ populators["delayed"].append((self.key, load_immediate))
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy="subquery")
+class SubqueryLoader(PostLoader):
+ __slots__ = ("join_depth",)
+
+ def __init__(self, parent, strategy_key):
+ super(SubqueryLoader, self).__init__(parent, strategy_key)
+ self.join_depth = self.parent_property.join_depth
+
+ def init_class_attribute(self, mapper):
+ self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ ).init_class_attribute(mapper)
+
+ def _get_leftmost(
+ self,
+ orig_query_entity_index,
+ subq_path,
+ current_compile_state,
+ is_root,
+ ):
+ given_subq_path = subq_path
+ subq_path = subq_path.path
+ subq_mapper = orm_util._class_to_mapper(subq_path[0])
+
+ # determine attributes of the leftmost mapper
+ if (
+ self.parent.isa(subq_mapper)
+ and self.parent_property is subq_path[1]
+ ):
+ leftmost_mapper, leftmost_prop = self.parent, self.parent_property
+ else:
+ leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
+
+ if is_root:
+ # the subq_path is also coming from cached state, so when we start
+ # building up this path, it has to also be converted to be in terms
+ # of the current state. this is for the specific case of the entity
+ # is an AliasedClass against a subquery that's not otherwise going
+ # to adapt
+ new_subq_path = current_compile_state._entities[
+ orig_query_entity_index
+ ].entity_zero._path_registry[leftmost_prop]
+ additional = len(subq_path) - len(new_subq_path)
+ if additional:
+ new_subq_path += path_registry.PathRegistry.coerce(
+ subq_path[-additional:]
+ )
+ else:
+ new_subq_path = given_subq_path
+
+ leftmost_cols = leftmost_prop.local_columns
+
+ leftmost_attr = [
+ getattr(
+ new_subq_path.path[0].entity,
+ leftmost_mapper._columntoproperty[c].key,
+ )
+ for c in leftmost_cols
+ ]
+
+ return leftmost_mapper, leftmost_attr, leftmost_prop, new_subq_path
+
+ def _generate_from_original_query(
+ self,
+ orig_compile_state,
+ orig_query,
+ leftmost_mapper,
+ leftmost_attr,
+ leftmost_relationship,
+ orig_entity,
+ ):
+ # reformat the original query
+ # to look only for significant columns
+ q = orig_query._clone().correlate(None)
+
+ # LEGACY: make a Query back from the select() !!
+ # This suits at least two legacy cases:
+ # 1. applications which expect before_compile() to be called
+ # below when we run .subquery() on this query (Keystone)
+ # 2. applications which are doing subqueryload with complex
+ # from_self() queries, as query.subquery() / .statement
+ # has to do the full compile context for multiply-nested
+ # from_self() (Neutron) - see test_subqload_from_self
+ # for demo.
+ q2 = query.Query.__new__(query.Query)
+ q2.__dict__.update(q.__dict__)
+ q = q2
+
+ # set the query's "FROM" list explicitly to what the
+ # FROM list would be in any case, as we will be limiting
+ # the columns in the SELECT list which may no longer include
+ # all entities mentioned in things like WHERE, JOIN, etc.
+ if not q._from_obj:
+ q._enable_assertions = False
+ q.select_from.non_generative(
+ q,
+ *{
+ ent["entity"]
+ for ent in _column_descriptions(
+ orig_query, compile_state=orig_compile_state
+ )
+ if ent["entity"] is not None
+ }
+ )
+
+ # select from the identity columns of the outer (specifically, these
+ # are the 'local_cols' of the property). This will remove other
+ # columns from the query that might suggest the right entity which is
+ # why we do set select_from above. The attributes we have are
+ # coerced and adapted using the original query's adapter, which is
+ # needed only for the case of adapting a subclass column to
+ # that of a polymorphic selectable, e.g. we have
+ # Engineer.primary_language and the entity is Person. All other
+ # adaptations, e.g. from_self, select_entity_from(), will occur
+ # within the new query when it compiles, as the compile_state we are
+ # using here is only a partial one. If the subqueryload is from a
+ # with_polymorphic() or other aliased() object, left_attr will already
+ # be the correct attributes so no adaptation is needed.
+ target_cols = orig_compile_state._adapt_col_list(
+ [
+ sql.coercions.expect(sql.roles.ColumnsClauseRole, o)
+ for o in leftmost_attr
+ ],
+ orig_compile_state._get_current_adapter(),
+ )
+ q._raw_columns = target_cols
+
+ distinct_target_key = leftmost_relationship.distinct_target_key
+
+ if distinct_target_key is True:
+ q._distinct = True
+ elif distinct_target_key is None:
+ # if target_cols refer to a non-primary key or only
+ # part of a composite primary key, set the q as distinct
+ for t in set(c.table for c in target_cols):
+ if not set(target_cols).issuperset(t.primary_key):
+ q._distinct = True
+ break
+
+ # don't need ORDER BY if no limit/offset
+ if not q._has_row_limiting_clause:
+ q._order_by_clauses = ()
+
+ if q._distinct is True and q._order_by_clauses:
+ # the logic to automatically add the order by columns to the query
+ # when distinct is True is deprecated in the query
+ to_add = sql_util.expand_column_list_from_order_by(
+ target_cols, q._order_by_clauses
+ )
+ if to_add:
+ q._set_entities(target_cols + to_add)
+
+ # the original query now becomes a subquery
+ # which we'll join onto.
+ # LEGACY: as "q" is a Query, the before_compile() event is invoked
+ # here.
+ embed_q = q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery()
+ left_alias = orm_util.AliasedClass(
+ leftmost_mapper, embed_q, use_mapper_path=True
+ )
+ return left_alias
+
+ def _prep_for_joins(self, left_alias, subq_path):
+ # figure out what's being joined. a.k.a. the fun part
+ to_join = []
+ pairs = list(subq_path.pairs())
+
+ for i, (mapper, prop) in enumerate(pairs):
+ if i > 0:
+ # look at the previous mapper in the chain -
+ # if it is as or more specific than this prop's
+ # mapper, use that instead.
+ # note we have an assumption here that
+ # the non-first element is always going to be a mapper,
+ # not an AliasedClass
+
+ prev_mapper = pairs[i - 1][1].mapper
+ to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
+ else:
+ to_append = mapper
+
+ to_join.append((to_append, prop.key))
+
+ # determine the immediate parent class we are joining from,
+ # which needs to be aliased.
+
+ if len(to_join) < 2:
+ # in the case of a one level eager load, this is the
+ # leftmost "left_alias".
+ parent_alias = left_alias
+ else:
+ info = inspect(to_join[-1][0])
+ if info.is_aliased_class:
+ parent_alias = info.entity
+ else:
+ # alias a plain mapper as we may be
+ # joining multiple times
+ parent_alias = orm_util.AliasedClass(
+ info.entity, use_mapper_path=True
+ )
+
+ local_cols = self.parent_property.local_columns
+
+ local_attr = [
+ getattr(parent_alias, self.parent._columntoproperty[c].key)
+ for c in local_cols
+ ]
+ return to_join, local_attr, parent_alias
+
+ def _apply_joins(
+ self, q, to_join, left_alias, parent_alias, effective_entity
+ ):
+
+ ltj = len(to_join)
+ if ltj == 1:
+ to_join = [
+ getattr(left_alias, to_join[0][1]).of_type(effective_entity)
+ ]
+ elif ltj == 2:
+ to_join = [
+ getattr(left_alias, to_join[0][1]).of_type(parent_alias),
+ getattr(parent_alias, to_join[-1][1]).of_type(
+ effective_entity
+ ),
+ ]
+ elif ltj > 2:
+ middle = [
+ (
+ orm_util.AliasedClass(item[0])
+ if not inspect(item[0]).is_aliased_class
+ else item[0].entity,
+ item[1],
+ )
+ for item in to_join[1:-1]
+ ]
+ inner = []
+
+ while middle:
+ item = middle.pop(0)
+ attr = getattr(item[0], item[1])
+ if middle:
+ attr = attr.of_type(middle[0][0])
+ else:
+ attr = attr.of_type(parent_alias)
+
+ inner.append(attr)
+
+ to_join = (
+ [getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ + inner
+ + [
+ getattr(parent_alias, to_join[-1][1]).of_type(
+ effective_entity
+ )
+ ]
+ )
+
+ for attr in to_join:
+ q = q.join(attr)
+
+ return q
+
+ def _setup_options(
+ self,
+ context,
+ q,
+ subq_path,
+ rewritten_path,
+ orig_query,
+ effective_entity,
+ loadopt,
+ ):
+
+ # note that because the subqueryload object
+ # does not re-use the cached query, instead always making
+ # use of the current invoked query, while we have two queries
+ # here (orig and context.query), they are both non-cached
+ # queries and we can transfer the options as is without
+ # adjusting for new criteria. Some work on #6881 / #6889
+ # brought this into question.
+ new_options = orig_query._with_options
+
+ if loadopt and loadopt._extra_criteria:
+
+ new_options += (
+ orm_util.LoaderCriteriaOption(
+ self.entity,
+ loadopt._generate_extra_criteria(context),
+ ),
+ )
+
+ # propagate loader options etc. to the new query.
+ # these will fire relative to subq_path.
+ q = q._with_current_path(rewritten_path)
+ q = q.options(*new_options)
+
+ return q
+
+ def _setup_outermost_orderby(self, q):
+ if self.parent_property.order_by:
+
+ def _setup_outermost_orderby(compile_context):
+ compile_context.eager_order_by += tuple(
+ util.to_list(self.parent_property.order_by)
+ )
+
+ q = q._add_context_option(
+ _setup_outermost_orderby, self.parent_property
+ )
+
+ return q
+
+ class _SubqCollections(object):
+ """Given a :class:`_query.Query` used to emit the "subquery load",
+ provide a load interface that executes the query at the
+ first moment a value is needed.
+
+ """
+
+ __slots__ = (
+ "session",
+ "execution_options",
+ "load_options",
+ "params",
+ "subq",
+ "_data",
+ )
+
+ def __init__(self, context, subq):
+ # avoid creating a cycle by storing context
+ # even though that's preferable
+ self.session = context.session
+ self.execution_options = context.execution_options
+ self.load_options = context.load_options
+ self.params = context.params or {}
+ self.subq = subq
+ self._data = None
+
+ def get(self, key, default):
+ if self._data is None:
+ self._load()
+ return self._data.get(key, default)
+
+ def _load(self):
+ self._data = collections.defaultdict(list)
+
+ q = self.subq
+ assert q.session is None
+
+ q = q.with_session(self.session)
+
+ if self.load_options._populate_existing:
+ q = q.populate_existing()
+ # to work with baked query, the parameters may have been
+ # updated since this query was created, so take these into account
+
+ rows = list(q.params(self.params))
+ for k, v in itertools.groupby(rows, lambda x: x[1:]):
+ self._data[k].extend(vv[0] for vv in v)
+
+ def loader(self, state, dict_, row):
+ if self._data is None:
+ self._load()
+
+ def _setup_query_from_rowproc(
+ self,
+ context,
+ query_entity,
+ path,
+ entity,
+ loadopt,
+ adapter,
+ ):
+ compile_state = context.compile_state
+ if (
+ not compile_state.compile_options._enable_eagerloads
+ or compile_state.compile_options._for_refresh_state
+ ):
+ return
+
+ orig_query_entity_index = compile_state._entities.index(query_entity)
+ context.loaders_require_buffering = True
+
+ path = path[self.parent_property]
+
+ # build up a path indicating the path from the leftmost
+ # entity to the thing we're subquery loading.
+ with_poly_entity = path.get(
+ compile_state.attributes, "path_with_polymorphic", None
+ )
+ if with_poly_entity is not None:
+ effective_entity = with_poly_entity
+ else:
+ effective_entity = self.entity
+
+ subq_path, rewritten_path = context.query._execution_options.get(
+ ("subquery_paths", None),
+ (orm_util.PathRegistry.root, orm_util.PathRegistry.root),
+ )
+ is_root = subq_path is orm_util.PathRegistry.root
+ subq_path = subq_path + path
+ rewritten_path = rewritten_path + path
+
+ # if not via query option, check for
+ # a cycle
+ # TODO: why is this here??? this is now handled
+ # by the _check_recursive_postload call
+ if not path.contains(compile_state.attributes, "loader"):
+ if self.join_depth:
+ if (
+ (
+ compile_state.current_path.length
+ if compile_state.current_path
+ else 0
+ )
+ + path.length
+ ) / 2 > self.join_depth:
+ return
+ elif subq_path.contains_mapper(self.mapper):
+ return
+
+ # use the current query being invoked, not the compile state
+ # one. this is so that we get the current parameters. however,
+ # it means we can't use the existing compile state, we have to make
+ # a new one. other approaches include possibly using the
+ # compiled query but swapping the params, seems only marginally
+ # less time spent but more complicated
+ orig_query = context.query._execution_options.get(
+ ("orig_query", SubqueryLoader), context.query
+ )
+
+ # make a new compile_state for the query that's probably cached, but
+ # we're sort of undoing a bit of that caching :(
+ compile_state_cls = ORMCompileState._get_plugin_class_for_plugin(
+ orig_query, "orm"
+ )
+
+ if orig_query._is_lambda_element:
+ if context.load_options._lazy_loaded_from is None:
+ util.warn(
+ 'subqueryloader for "%s" must invoke lambda callable '
+ "at %r in "
+ "order to produce a new query, decreasing the efficiency "
+ "of caching for this statement. Consider using "
+ "selectinload() for more effective full-lambda caching"
+ % (self, orig_query)
+ )
+ orig_query = orig_query._resolved
+
+ # this is the more "quick" version, however it's not clear how
+ # much of this we need. in particular I can't get a test to
+ # fail if the "set_base_alias" is missing and not sure why that is.
+ orig_compile_state = compile_state_cls._create_entities_collection(
+ orig_query, legacy=False
+ )
+
+ (
+ leftmost_mapper,
+ leftmost_attr,
+ leftmost_relationship,
+ rewritten_path,
+ ) = self._get_leftmost(
+ orig_query_entity_index,
+ rewritten_path,
+ orig_compile_state,
+ is_root,
+ )
+
+ # generate a new Query from the original, then
+ # produce a subquery from it.
+ left_alias = self._generate_from_original_query(
+ orig_compile_state,
+ orig_query,
+ leftmost_mapper,
+ leftmost_attr,
+ leftmost_relationship,
+ entity,
+ )
+
+ # generate another Query that will join the
+ # left alias to the target relationships.
+ # basically doing a longhand
+ # "from_self()". (from_self() itself not quite industrial
+ # strength enough for all contingencies...but very close)
+
+ q = query.Query(effective_entity)
+
+ q._execution_options = q._execution_options.union(
+ {
+ ("orig_query", SubqueryLoader): orig_query,
+ ("subquery_paths", None): (subq_path, rewritten_path),
+ }
+ )
+
+ q = q._set_enable_single_crit(False)
+ to_join, local_attr, parent_alias = self._prep_for_joins(
+ left_alias, subq_path
+ )
+
+ q = q.add_columns(*local_attr)
+ q = self._apply_joins(
+ q, to_join, left_alias, parent_alias, effective_entity
+ )
+
+ q = self._setup_options(
+ context,
+ q,
+ subq_path,
+ rewritten_path,
+ orig_query,
+ effective_entity,
+ loadopt,
+ )
+ q = self._setup_outermost_orderby(q)
+
+ return q
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+
+ if context.refresh_state:
+ return self._immediateload_create_row_processor(
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+ # the subqueryloader does a similar check in setup_query() unlike
+ # the other post loaders, however we have this here for consistency
+ elif self._check_recursive_postload(context, path, self.join_depth):
+ return
+ elif not isinstance(context.compile_state, ORMSelectCompileState):
+ # issue 7505 - subqueryload() in 1.3 and previous would silently
+ # degrade for from_statement() without warning. this behavior
+ # is restored here
+ return
+
+ if not self.parent.class_manager[self.key].impl.supports_population:
+ raise sa_exc.InvalidRequestError(
+ "'%s' does not support object "
+ "population - eager loading cannot be applied." % self
+ )
+
+ # a little dance here as the "path" is still something that only
+ # semi-tracks the exact series of things we are loading, still not
+ # telling us about with_polymorphic() and stuff like that when it's at
+ # the root.. the initial MapperEntity is more accurate for this case.
+ if len(path) == 1:
+ if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
+ return
+ elif not orm_util._entity_isa(path[-1], self.parent):
+ return
+
+ subq = self._setup_query_from_rowproc(
+ context,
+ query_entity,
+ path,
+ path[-1],
+ loadopt,
+ adapter,
+ )
+
+ if subq is None:
+ return
+
+ assert subq.session is None
+
+ path = path[self.parent_property]
+
+ local_cols = self.parent_property.local_columns
+
+ # cache the loaded collections in the context
+ # so that inheriting mappers don't re-load when they
+ # call upon create_row_processor again
+ collections = path.get(context.attributes, "collections")
+ if collections is None:
+ collections = self._SubqCollections(context, subq)
+ path.set(context.attributes, "collections", collections)
+
+ if adapter:
+ local_cols = [adapter.columns[c] for c in local_cols]
+
+ if self.uselist:
+ self._create_collection_loader(
+ context, result, collections, local_cols, populators
+ )
+ else:
+ self._create_scalar_loader(
+ context, result, collections, local_cols, populators
+ )
+
+ def _create_collection_loader(
+ self, context, result, collections, local_cols, populators
+ ):
+ tuple_getter = result._tuple_getter(local_cols)
+
+ def load_collection_from_subq(state, dict_, row):
+ collection = collections.get(tuple_getter(row), ())
+ state.get_impl(self.key).set_committed_value(
+ state, dict_, collection
+ )
+
+ def load_collection_from_subq_existing_row(state, dict_, row):
+ if self.key not in dict_:
+ load_collection_from_subq(state, dict_, row)
+
+ populators["new"].append((self.key, load_collection_from_subq))
+ populators["existing"].append(
+ (self.key, load_collection_from_subq_existing_row)
+ )
+
+ if context.invoke_all_eagers:
+ populators["eager"].append((self.key, collections.loader))
+
+ def _create_scalar_loader(
+ self, context, result, collections, local_cols, populators
+ ):
+ tuple_getter = result._tuple_getter(local_cols)
+
+ def load_scalar_from_subq(state, dict_, row):
+ collection = collections.get(tuple_getter(row), (None,))
+ if len(collection) > 1:
+ util.warn(
+ "Multiple rows returned with "
+ "uselist=False for eagerly-loaded attribute '%s' " % self
+ )
+
+ scalar = collection[0]
+ state.get_impl(self.key).set_committed_value(state, dict_, scalar)
+
+ def load_scalar_from_subq_existing_row(state, dict_, row):
+ if self.key not in dict_:
+ load_scalar_from_subq(state, dict_, row)
+
+ populators["new"].append((self.key, load_scalar_from_subq))
+ populators["existing"].append(
+ (self.key, load_scalar_from_subq_existing_row)
+ )
+ if context.invoke_all_eagers:
+ populators["eager"].append((self.key, collections.loader))
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy="joined")
+@relationships.RelationshipProperty.strategy_for(lazy=False)
+class JoinedLoader(AbstractRelationshipLoader):
+ """Provide loading behavior for a :class:`.RelationshipProperty`
+ using joined eager loading.
+
+ """
+
+ __slots__ = "join_depth", "_aliased_class_pool"
+
+ def __init__(self, parent, strategy_key):
+ super(JoinedLoader, self).__init__(parent, strategy_key)
+ self.join_depth = self.parent_property.join_depth
+ self._aliased_class_pool = []
+
+ def init_class_attribute(self, mapper):
+ self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ ).init_class_attribute(mapper)
+
+ def setup_query(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection=None,
+ parentmapper=None,
+ chained_from_outerjoin=False,
+ **kwargs
+ ):
+ """Add a left outer join to the statement that's being constructed."""
+
+ if not compile_state.compile_options._enable_eagerloads:
+ return
+ elif self.uselist:
+ compile_state.multi_row_eager_loaders = True
+
+ path = path[self.parent_property]
+
+ with_polymorphic = None
+
+ user_defined_adapter = (
+ self._init_user_defined_eager_proc(
+ loadopt, compile_state, compile_state.attributes
+ )
+ if loadopt
+ else False
+ )
+
+ if user_defined_adapter is not False:
+ (
+ clauses,
+ adapter,
+ add_to_collection,
+ ) = self._setup_query_on_user_defined_adapter(
+ compile_state,
+ query_entity,
+ path,
+ adapter,
+ user_defined_adapter,
+ )
+ else:
+ # if not via query option, check for
+ # a cycle
+ if not path.contains(compile_state.attributes, "loader"):
+ if self.join_depth:
+ if path.length / 2 > self.join_depth:
+ return
+ elif path.contains_mapper(self.mapper):
+ return
+
+ (
+ clauses,
+ adapter,
+ add_to_collection,
+ chained_from_outerjoin,
+ ) = self._generate_row_adapter(
+ compile_state,
+ query_entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ parentmapper,
+ chained_from_outerjoin,
+ )
+
+ with_poly_entity = path.get(
+ compile_state.attributes, "path_with_polymorphic", None
+ )
+ if with_poly_entity is not None:
+ with_polymorphic = inspect(
+ with_poly_entity
+ ).with_polymorphic_mappers
+ else:
+ with_polymorphic = None
+
+ path = path[self.entity]
+
+ loading._setup_entity_query(
+ compile_state,
+ self.mapper,
+ query_entity,
+ path,
+ clauses,
+ add_to_collection,
+ with_polymorphic=with_polymorphic,
+ parentmapper=self.mapper,
+ chained_from_outerjoin=chained_from_outerjoin,
+ )
+
+ if with_poly_entity is not None and None in set(
+ compile_state.secondary_columns
+ ):
+ raise sa_exc.InvalidRequestError(
+ "Detected unaliased columns when generating joined "
+ "load. Make sure to use aliased=True or flat=True "
+ "when using joined loading with with_polymorphic()."
+ )
+
+ def _init_user_defined_eager_proc(
+ self, loadopt, compile_state, target_attributes
+ ):
+
+ # check if the opt applies at all
+ if "eager_from_alias" not in loadopt.local_opts:
+ # nope
+ return False
+
+ path = loadopt.path.parent
+
+ # the option applies. check if the "user_defined_eager_row_processor"
+ # has been built up.
+ adapter = path.get(
+ compile_state.attributes, "user_defined_eager_row_processor", False
+ )
+ if adapter is not False:
+ # just return it
+ return adapter
+
+ # otherwise figure it out.
+ alias = loadopt.local_opts["eager_from_alias"]
+ root_mapper, prop = path[-2:]
+
+ if alias is not None:
+ if isinstance(alias, str):
+ alias = prop.target.alias(alias)
+ adapter = sql_util.ColumnAdapter(
+ alias, equivalents=prop.mapper._equivalent_columns
+ )
+ else:
+ if path.contains(
+ compile_state.attributes, "path_with_polymorphic"
+ ):
+ with_poly_entity = path.get(
+ compile_state.attributes, "path_with_polymorphic"
+ )
+ adapter = orm_util.ORMAdapter(
+ with_poly_entity,
+ equivalents=prop.mapper._equivalent_columns,
+ )
+ else:
+ adapter = compile_state._polymorphic_adapters.get(
+ prop.mapper, None
+ )
+ path.set(
+ target_attributes,
+ "user_defined_eager_row_processor",
+ adapter,
+ )
+
+ return adapter
+
+ def _setup_query_on_user_defined_adapter(
+ self, context, entity, path, adapter, user_defined_adapter
+ ):
+
+ # apply some more wrapping to the "user defined adapter"
+ # if we are setting up the query for SQL render.
+ adapter = entity._get_entity_clauses(context)
+
+ if adapter and user_defined_adapter:
+ user_defined_adapter = user_defined_adapter.wrap(adapter)
+ path.set(
+ context.attributes,
+ "user_defined_eager_row_processor",
+ user_defined_adapter,
+ )
+ elif adapter:
+ user_defined_adapter = adapter
+ path.set(
+ context.attributes,
+ "user_defined_eager_row_processor",
+ user_defined_adapter,
+ )
+
+ add_to_collection = context.primary_columns
+ return user_defined_adapter, adapter, add_to_collection
+
+ def _gen_pooled_aliased_class(self, context):
+ # keep a local pool of AliasedClass objects that get re-used.
+ # we need one unique AliasedClass per query per appearance of our
+ # entity in the query.
+
+ if inspect(self.entity).is_aliased_class:
+ alt_selectable = inspect(self.entity).selectable
+ else:
+ alt_selectable = None
+
+ key = ("joinedloader_ac", self)
+ if key not in context.attributes:
+ context.attributes[key] = idx = 0
+ else:
+ context.attributes[key] = idx = context.attributes[key] + 1
+
+ if idx >= len(self._aliased_class_pool):
+ to_adapt = orm_util.AliasedClass(
+ self.mapper,
+ alias=alt_selectable._anonymous_fromclause(flat=True)
+ if alt_selectable is not None
+ else None,
+ flat=True,
+ use_mapper_path=True,
+ )
+
+ # load up the .columns collection on the Alias() before
+ # the object becomes shared among threads. this prevents
+ # races for column identities.
+ inspect(to_adapt).selectable.c
+ self._aliased_class_pool.append(to_adapt)
+
+ return self._aliased_class_pool[idx]
+
+ def _generate_row_adapter(
+ self,
+ compile_state,
+ entity,
+ path,
+ loadopt,
+ adapter,
+ column_collection,
+ parentmapper,
+ chained_from_outerjoin,
+ ):
+ with_poly_entity = path.get(
+ compile_state.attributes, "path_with_polymorphic", None
+ )
+ if with_poly_entity:
+ to_adapt = with_poly_entity
+ else:
+ to_adapt = self._gen_pooled_aliased_class(compile_state)
+
+ clauses = inspect(to_adapt)._memo(
+ ("joinedloader_ormadapter", self),
+ orm_util.ORMAdapter,
+ to_adapt,
+ equivalents=self.mapper._equivalent_columns,
+ adapt_required=True,
+ allow_label_resolve=False,
+ anonymize_labels=True,
+ )
+
+ assert clauses.aliased_class is not None
+
+ innerjoin = (
+ loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
+ if loadopt is not None
+ else self.parent_property.innerjoin
+ )
+
+ if not innerjoin:
+ # if this is an outer join, all non-nested eager joins from
+ # this path must also be outer joins
+ chained_from_outerjoin = True
+
+ compile_state.create_eager_joins.append(
+ (
+ self._create_eager_join,
+ entity,
+ path,
+ adapter,
+ parentmapper,
+ clauses,
+ innerjoin,
+ chained_from_outerjoin,
+ loadopt._extra_criteria if loadopt else (),
+ )
+ )
+
+ add_to_collection = compile_state.secondary_columns
+ path.set(compile_state.attributes, "eager_row_processor", clauses)
+
+ return clauses, adapter, add_to_collection, chained_from_outerjoin
+
+ def _create_eager_join(
+ self,
+ compile_state,
+ query_entity,
+ path,
+ adapter,
+ parentmapper,
+ clauses,
+ innerjoin,
+ chained_from_outerjoin,
+ extra_criteria,
+ ):
+ if parentmapper is None:
+ localparent = query_entity.mapper
+ else:
+ localparent = parentmapper
+
+ # whether or not the Query will wrap the selectable in a subquery,
+ # and then attach eager load joins to that (i.e., in the case of
+ # LIMIT/OFFSET etc.)
+ should_nest_selectable = (
+ compile_state.multi_row_eager_loaders
+ and compile_state._should_nest_selectable
+ )
+
+ query_entity_key = None
+
+ if (
+ query_entity not in compile_state.eager_joins
+ and not should_nest_selectable
+ and compile_state.from_clauses
+ ):
+
+ indexes = sql_util.find_left_clause_that_matches_given(
+ compile_state.from_clauses, query_entity.selectable
+ )
+
+ if len(indexes) > 1:
+ # for the eager load case, I can't reproduce this right
+ # now. For query.join() I can.
+ raise sa_exc.InvalidRequestError(
+ "Can't identify which query entity in which to joined "
+ "eager load from. Please use an exact match when "
+ "specifying the join path."
+ )
+
+ if indexes:
+ clause = compile_state.from_clauses[indexes[0]]
+ # join to an existing FROM clause on the query.
+ # key it to its list index in the eager_joins dict.
+ # Query._compile_context will adapt as needed and
+ # append to the FROM clause of the select().
+ query_entity_key, default_towrap = indexes[0], clause
+
+ if query_entity_key is None:
+ query_entity_key, default_towrap = (
+ query_entity,
+ query_entity.selectable,
+ )
+
+ towrap = compile_state.eager_joins.setdefault(
+ query_entity_key, default_towrap
+ )
+
+ if adapter:
+ if getattr(adapter, "aliased_class", None):
+ # joining from an adapted entity. The adapted entity
+ # might be a "with_polymorphic", so resolve that to our
+ # specific mapper's entity before looking for our attribute
+ # name on it.
+ efm = inspect(adapter.aliased_class)._entity_for_mapper(
+ localparent
+ if localparent.isa(self.parent)
+ else self.parent
+ )
+
+ # look for our attribute on the adapted entity, else fall back
+ # to our straight property
+ onclause = getattr(efm.entity, self.key, self.parent_property)
+ else:
+ onclause = getattr(
+ orm_util.AliasedClass(
+ self.parent, adapter.selectable, use_mapper_path=True
+ ),
+ self.key,
+ self.parent_property,
+ )
+
+ else:
+ onclause = self.parent_property
+
+ assert clauses.aliased_class is not None
+
+ attach_on_outside = (
+ not chained_from_outerjoin
+ or not innerjoin
+ or innerjoin == "unnested"
+ or query_entity.entity_zero.represents_outer_join
+ )
+
+ extra_join_criteria = extra_criteria
+ additional_entity_criteria = compile_state.global_attributes.get(
+ ("additional_entity_criteria", self.mapper), ()
+ )
+ if additional_entity_criteria:
+ extra_join_criteria += tuple(
+ ae._resolve_where_criteria(self.mapper)
+ for ae in additional_entity_criteria
+ if ae.propagate_to_loaders
+ )
+
+ if attach_on_outside:
+ # this is the "classic" eager join case.
+ eagerjoin = orm_util._ORMJoin(
+ towrap,
+ clauses.aliased_class,
+ onclause,
+ isouter=not innerjoin
+ or query_entity.entity_zero.represents_outer_join
+ or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
+ _left_memo=self.parent,
+ _right_memo=self.mapper,
+ _extra_criteria=extra_join_criteria,
+ )
+ else:
+ # all other cases are innerjoin=='nested' approach
+ eagerjoin = self._splice_nested_inner_join(
+ path, towrap, clauses, onclause, extra_join_criteria
+ )
+
+ compile_state.eager_joins[query_entity_key] = eagerjoin
+
+ # send a hint to the Query as to where it may "splice" this join
+ eagerjoin.stop_on = query_entity.selectable
+
+ if not parentmapper:
+ # for parentclause that is the non-eager end of the join,
+ # ensure all the parent cols in the primaryjoin are actually
+ # in the
+ # columns clause (i.e. are not deferred), so that aliasing applied
+ # by the Query propagates those columns outward.
+ # This has the effect
+ # of "undefering" those columns.
+ for col in sql_util._find_columns(
+ self.parent_property.primaryjoin
+ ):
+ if localparent.persist_selectable.c.contains_column(col):
+ if adapter:
+ col = adapter.columns[col]
+ compile_state._append_dedupe_col_collection(
+ col, compile_state.primary_columns
+ )
+
+ if self.parent_property.order_by:
+ compile_state.eager_order_by += tuple(
+ (eagerjoin._target_adapter.copy_and_process)(
+ util.to_list(self.parent_property.order_by)
+ )
+ )
+
+ def _splice_nested_inner_join(
+ self, path, join_obj, clauses, onclause, extra_criteria, splicing=False
+ ):
+
+ if splicing is False:
+ # first call is always handed a join object
+ # from the outside
+ assert isinstance(join_obj, orm_util._ORMJoin)
+ elif isinstance(join_obj, sql.selectable.FromGrouping):
+ return self._splice_nested_inner_join(
+ path,
+ join_obj.element,
+ clauses,
+ onclause,
+ extra_criteria,
+ splicing,
+ )
+ elif not isinstance(join_obj, orm_util._ORMJoin):
+ if path[-2] is splicing:
+ return orm_util._ORMJoin(
+ join_obj,
+ clauses.aliased_class,
+ onclause,
+ isouter=False,
+ _left_memo=splicing,
+ _right_memo=path[-1].mapper,
+ _extra_criteria=extra_criteria,
+ )
+ else:
+ # only here if splicing == True
+ return None
+
+ target_join = self._splice_nested_inner_join(
+ path,
+ join_obj.right,
+ clauses,
+ onclause,
+ extra_criteria,
+ join_obj._right_memo,
+ )
+ if target_join is None:
+ right_splice = False
+ target_join = self._splice_nested_inner_join(
+ path,
+ join_obj.left,
+ clauses,
+ onclause,
+ extra_criteria,
+ join_obj._left_memo,
+ )
+ if target_join is None:
+ # should only return None when recursively called,
+ # e.g. splicing==True
+ assert (
+ splicing is not False
+ ), "assertion failed attempting to produce joined eager loads"
+ return None
+ else:
+ right_splice = True
+
+ if right_splice:
+ # for a right splice, attempt to flatten out
+ # a JOIN b JOIN c JOIN .. to avoid needless
+ # parenthesis nesting
+ if not join_obj.isouter and not target_join.isouter:
+ eagerjoin = join_obj._splice_into_center(target_join)
+ else:
+ eagerjoin = orm_util._ORMJoin(
+ join_obj.left,
+ target_join,
+ join_obj.onclause,
+ isouter=join_obj.isouter,
+ _left_memo=join_obj._left_memo,
+ )
+ else:
+ eagerjoin = orm_util._ORMJoin(
+ target_join,
+ join_obj.right,
+ join_obj.onclause,
+ isouter=join_obj.isouter,
+ _right_memo=join_obj._right_memo,
+ )
+
+ eagerjoin._target_adapter = target_join._target_adapter
+ return eagerjoin
+
+ def _create_eager_adapter(self, context, result, adapter, path, loadopt):
+ compile_state = context.compile_state
+
+ user_defined_adapter = (
+ self._init_user_defined_eager_proc(
+ loadopt, compile_state, context.attributes
+ )
+ if loadopt
+ else False
+ )
+
+ if user_defined_adapter is not False:
+ decorator = user_defined_adapter
+ # user defined eagerloads are part of the "primary"
+ # portion of the load.
+ # the adapters applied to the Query should be honored.
+ if compile_state.compound_eager_adapter and decorator:
+ decorator = decorator.wrap(
+ compile_state.compound_eager_adapter
+ )
+ elif compile_state.compound_eager_adapter:
+ decorator = compile_state.compound_eager_adapter
+ else:
+ decorator = path.get(
+ compile_state.attributes, "eager_row_processor"
+ )
+ if decorator is None:
+ return False
+
+ if self.mapper._result_has_identity_key(result, decorator):
+ return decorator
+ else:
+ # no identity key - don't return a row
+ # processor, will cause a degrade to lazy
+ return False
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+ if not self.parent.class_manager[self.key].impl.supports_population:
+ raise sa_exc.InvalidRequestError(
+ "'%s' does not support object "
+ "population - eager loading cannot be applied." % self
+ )
+
+ if self.uselist:
+ context.loaders_require_uniquing = True
+
+ our_path = path[self.parent_property]
+
+ eager_adapter = self._create_eager_adapter(
+ context, result, adapter, our_path, loadopt
+ )
+
+ if eager_adapter is not False:
+ key = self.key
+
+ _instance = loading._instance_processor(
+ query_entity,
+ self.mapper,
+ context,
+ result,
+ our_path[self.entity],
+ eager_adapter,
+ )
+
+ if not self.uselist:
+ self._create_scalar_loader(context, key, _instance, populators)
+ else:
+ self._create_collection_loader(
+ context, key, _instance, populators
+ )
+ else:
+ self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ ).create_row_processor(
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+
+ def _create_collection_loader(self, context, key, _instance, populators):
+ def load_collection_from_joined_new_row(state, dict_, row):
+ # note this must unconditionally clear out any existing collection.
+ # an existing collection would be present only in the case of
+ # populate_existing().
+ collection = attributes.init_state_collection(state, dict_, key)
+ result_list = util.UniqueAppender(
+ collection, "append_without_event"
+ )
+ context.attributes[(state, key)] = result_list
+ inst = _instance(row)
+ if inst is not None:
+ result_list.append(inst)
+
+ def load_collection_from_joined_existing_row(state, dict_, row):
+ if (state, key) in context.attributes:
+ result_list = context.attributes[(state, key)]
+ else:
+ # appender_key can be absent from context.attributes
+ # with isnew=False when self-referential eager loading
+ # is used; the same instance may be present in two
+ # distinct sets of result columns
+ collection = attributes.init_state_collection(
+ state, dict_, key
+ )
+ result_list = util.UniqueAppender(
+ collection, "append_without_event"
+ )
+ context.attributes[(state, key)] = result_list
+ inst = _instance(row)
+ if inst is not None:
+ result_list.append(inst)
+
+ def load_collection_from_joined_exec(state, dict_, row):
+ _instance(row)
+
+ populators["new"].append(
+ (self.key, load_collection_from_joined_new_row)
+ )
+ populators["existing"].append(
+ (self.key, load_collection_from_joined_existing_row)
+ )
+ if context.invoke_all_eagers:
+ populators["eager"].append(
+ (self.key, load_collection_from_joined_exec)
+ )
+
+ def _create_scalar_loader(self, context, key, _instance, populators):
+ def load_scalar_from_joined_new_row(state, dict_, row):
+ # set a scalar object instance directly on the parent
+ # object, bypassing InstrumentedAttribute event handlers.
+ dict_[key] = _instance(row)
+
+ def load_scalar_from_joined_existing_row(state, dict_, row):
+ # call _instance on the row, even though the object has
+ # been created, so that we further descend into properties
+ existing = _instance(row)
+
+ # conflicting value already loaded, this shouldn't happen
+ if key in dict_:
+ if existing is not dict_[key]:
+ util.warn(
+ "Multiple rows returned with "
+ "uselist=False for eagerly-loaded attribute '%s' "
+ % self
+ )
+ else:
+ # this case is when one row has multiple loads of the
+ # same entity (e.g. via aliasing), one has an attribute
+ # that the other doesn't.
+ dict_[key] = existing
+
+ def load_scalar_from_joined_exec(state, dict_, row):
+ _instance(row)
+
+ populators["new"].append((self.key, load_scalar_from_joined_new_row))
+ populators["existing"].append(
+ (self.key, load_scalar_from_joined_existing_row)
+ )
+ if context.invoke_all_eagers:
+ populators["eager"].append(
+ (self.key, load_scalar_from_joined_exec)
+ )
+
+
+@log.class_logger
+@relationships.RelationshipProperty.strategy_for(lazy="selectin")
+class SelectInLoader(PostLoader, util.MemoizedSlots):
+ __slots__ = (
+ "join_depth",
+ "omit_join",
+ "_parent_alias",
+ "_query_info",
+ "_fallback_query_info",
+ )
+
+ query_info = collections.namedtuple(
+ "queryinfo",
+ [
+ "load_only_child",
+ "load_with_join",
+ "in_expr",
+ "pk_cols",
+ "zero_idx",
+ "child_lookup_cols",
+ ],
+ )
+
+ _chunksize = 500
+
+ def __init__(self, parent, strategy_key):
+ super(SelectInLoader, self).__init__(parent, strategy_key)
+ self.join_depth = self.parent_property.join_depth
+ is_m2o = self.parent_property.direction is interfaces.MANYTOONE
+
+ if self.parent_property.omit_join is not None:
+ self.omit_join = self.parent_property.omit_join
+ else:
+ lazyloader = self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ )
+ if is_m2o:
+ self.omit_join = lazyloader.use_get
+ else:
+ self.omit_join = self.parent._get_clause[0].compare(
+ lazyloader._rev_lazywhere,
+ use_proxies=True,
+ compare_keys=False,
+ equivalents=self.parent._equivalent_columns,
+ )
+
+ if self.omit_join:
+ if is_m2o:
+ self._query_info = self._init_for_omit_join_m2o()
+ self._fallback_query_info = self._init_for_join()
+ else:
+ self._query_info = self._init_for_omit_join()
+ else:
+ self._query_info = self._init_for_join()
+
+ def _init_for_omit_join(self):
+ pk_to_fk = dict(
+ self.parent_property._join_condition.local_remote_pairs
+ )
+ pk_to_fk.update(
+ (equiv, pk_to_fk[k])
+ for k in list(pk_to_fk)
+ for equiv in self.parent._equivalent_columns.get(k, ())
+ )
+
+ pk_cols = fk_cols = [
+ pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
+ ]
+ if len(fk_cols) > 1:
+ in_expr = sql.tuple_(*fk_cols)
+ zero_idx = False
+ else:
+ in_expr = fk_cols[0]
+ zero_idx = True
+
+ return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
+
+ def _init_for_omit_join_m2o(self):
+ pk_cols = self.mapper.primary_key
+ if len(pk_cols) > 1:
+ in_expr = sql.tuple_(*pk_cols)
+ zero_idx = False
+ else:
+ in_expr = pk_cols[0]
+ zero_idx = True
+
+ lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
+ lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
+
+ return self.query_info(
+ True, False, in_expr, pk_cols, zero_idx, lookup_cols
+ )
+
+ def _init_for_join(self):
+ self._parent_alias = aliased(self.parent.class_)
+ pa_insp = inspect(self._parent_alias)
+ pk_cols = [
+ pa_insp._adapt_element(col) for col in self.parent.primary_key
+ ]
+ if len(pk_cols) > 1:
+ in_expr = sql.tuple_(*pk_cols)
+ zero_idx = False
+ else:
+ in_expr = pk_cols[0]
+ zero_idx = True
+ return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
+
+ def init_class_attribute(self, mapper):
+ self.parent_property._get_strategy(
+ (("lazy", "select"),)
+ ).init_class_attribute(mapper)
+
+ def create_row_processor(
+ self,
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ ):
+
+ if context.refresh_state:
+ return self._immediateload_create_row_processor(
+ context,
+ query_entity,
+ path,
+ loadopt,
+ mapper,
+ result,
+ adapter,
+ populators,
+ )
+ elif self._check_recursive_postload(context, path, self.join_depth):
+ return
+
+ if not self.parent.class_manager[self.key].impl.supports_population:
+ raise sa_exc.InvalidRequestError(
+ "'%s' does not support object "
+ "population - eager loading cannot be applied." % self
+ )
+
+ # a little dance here as the "path" is still something that only
+ # semi-tracks the exact series of things we are loading, still not
+ # telling us about with_polymorphic() and stuff like that when it's at
+ # the root.. the initial MapperEntity is more accurate for this case.
+ if len(path) == 1:
+ if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
+ return
+ elif not orm_util._entity_isa(path[-1], self.parent):
+ return
+
+ selectin_path = (
+ context.compile_state.current_path or orm_util.PathRegistry.root
+ ) + path
+
+ path_w_prop = path[self.parent_property]
+
+ # build up a path indicating the path from the leftmost
+ # entity to the thing we're subquery loading.
+ with_poly_entity = path_w_prop.get(
+ context.attributes, "path_with_polymorphic", None
+ )
+ if with_poly_entity is not None:
+ effective_entity = inspect(with_poly_entity)
+ else:
+ effective_entity = self.entity
+
+ loading.PostLoad.callable_for_path(
+ context,
+ selectin_path,
+ self.parent,
+ self.parent_property,
+ self._load_for_path,
+ effective_entity,
+ loadopt,
+ )
+
+ def _load_for_path(
+ self, context, path, states, load_only, effective_entity, loadopt
+ ):
+ if load_only and self.key not in load_only:
+ return
+
+ query_info = self._query_info
+
+ if query_info.load_only_child:
+ our_states = collections.defaultdict(list)
+ none_states = []
+
+ mapper = self.parent
+
+ for state, overwrite in states:
+ state_dict = state.dict
+ related_ident = tuple(
+ mapper._get_state_attr_by_column(
+ state,
+ state_dict,
+ lk,
+ passive=attributes.PASSIVE_NO_FETCH,
+ )
+ for lk in query_info.child_lookup_cols
+ )
+ # if the loaded parent objects do not have the foreign key
+ # to the related item loaded, then degrade into the joined
+ # version of selectinload
+ if attributes.PASSIVE_NO_RESULT in related_ident:
+ query_info = self._fallback_query_info
+ break
+
+ # organize states into lists keyed to particular foreign
+ # key values.
+ if None not in related_ident:
+ our_states[related_ident].append(
+ (state, state_dict, overwrite)
+ )
+ else:
+ # For FK values that have None, add them to a
+ # separate collection that will be populated separately
+ none_states.append((state, state_dict, overwrite))
+
+ # note the above conditional may have changed query_info
+ if not query_info.load_only_child:
+ our_states = [
+ (state.key[1], state, state.dict, overwrite)
+ for state, overwrite in states
+ ]
+
+ pk_cols = query_info.pk_cols
+ in_expr = query_info.in_expr
+
+ if not query_info.load_with_join:
+ # in "omit join" mode, the primary key column and the
+ # "in" expression are in terms of the related entity. So
+ # if the related entity is polymorphic or otherwise aliased,
+ # we need to adapt our "pk_cols" and "in_expr" to that
+ # entity. in non-"omit join" mode, these are against the
+ # parent entity and do not need adaption.
+ if effective_entity.is_aliased_class:
+ pk_cols = [
+ effective_entity._adapt_element(col) for col in pk_cols
+ ]
+ in_expr = effective_entity._adapt_element(in_expr)
+
+ bundle_ent = orm_util.Bundle("pk", *pk_cols)
+ bundle_sql = bundle_ent.__clause_element__()
+
+ entity_sql = effective_entity.__clause_element__()
+ q = Select._create_raw_select(
+ _raw_columns=[bundle_sql, entity_sql],
+ _label_style=LABEL_STYLE_TABLENAME_PLUS_COL,
+ _compile_options=ORMCompileState.default_compile_options,
+ _propagate_attrs={
+ "compile_state_plugin": "orm",
+ "plugin_subject": effective_entity,
+ },
+ )
+
+ if not query_info.load_with_join:
+ # the Bundle we have in the "omit_join" case is against raw, non
+ # annotated columns, so to ensure the Query knows its primary
+ # entity, we add it explicitly. If we made the Bundle against
+ # annotated columns, we hit a performance issue in this specific
+ # case, which is detailed in issue #4347.
+ q = q.select_from(effective_entity)
+ else:
+ # in the non-omit_join case, the Bundle is against the annotated/
+ # mapped column of the parent entity, but the #4347 issue does not
+ # occur in this case.
+ q = q.select_from(self._parent_alias).join(
+ getattr(self._parent_alias, self.parent_property.key).of_type(
+ effective_entity
+ )
+ )
+
+ q = q.filter(in_expr.in_(sql.bindparam("primary_keys")))
+
+ # a test which exercises what these comments talk about is
+ # test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic
+ #
+ # effective_entity above is given to us in terms of the cached
+ # statement, namely this one:
+ orig_query = context.compile_state.select_statement
+
+ # the actual statement that was requested is this one:
+ # context_query = context.query
+ #
+ # that's not the cached one, however. So while it is of the identical
+ # structure, if it has entities like AliasedInsp, which we get from
+ # aliased() or with_polymorphic(), the AliasedInsp will likely be a
+ # different object identity each time, and will not match up
+ # hashing-wise to the corresponding AliasedInsp that's in the
+ # cached query, meaning it won't match on paths and loader lookups
+ # and loaders like this one will be skipped if it is used in options.
+ #
+ # Now we want to transfer loader options from the parent query to the
+ # "selectinload" query we're about to run. Which query do we transfer
+ # the options from? We use the cached query, because the options in
+ # that query will be in terms of the effective entity we were just
+ # handed.
+ #
+ # But now the selectinload query we are running is *also*
+ # cached. What if it's cached and running from some previous iteration
+ # of that AliasedInsp? Well in that case it will also use the previous
+ # iteration of the loader options. If the query expires and
+ # gets generated again, it will be handed the current effective_entity
+ # and the current _with_options, again in terms of whatever
+ # compile_state.select_statement happens to be right now, so the
+ # query will still be internally consistent and loader callables
+ # will be correctly invoked.
+
+ effective_path = path[self.parent_property]
+
+ if orig_query is context.query:
+ options = new_options = orig_query._with_options
+ user_defined_options = []
+ else:
+ options = orig_query._with_options
+
+ # propagate compile state options from the original query,
+ # updating their "extra_criteria" as necessary.
+ # note this will create a different cache key than
+ # "orig" options if extra_criteria is present, because the copy
+ # of extra_criteria will have different boundparam than that of
+ # the QueryableAttribute in the path
+
+ new_options = [
+ orig_opt._adjust_for_extra_criteria(context)
+ if orig_opt._is_strategy_option
+ else orig_opt
+ for orig_opt in options
+ if orig_opt._is_compile_state or orig_opt._is_legacy_option
+ ]
+
+ # propagate user defined options from the current query
+ user_defined_options = [
+ opt
+ for opt in context.query._with_options
+ if not opt._is_compile_state and not opt._is_legacy_option
+ ]
+
+ if loadopt and loadopt._extra_criteria:
+ new_options += (
+ orm_util.LoaderCriteriaOption(
+ effective_entity,
+ loadopt._generate_extra_criteria(context),
+ ),
+ )
+
+ q = q.options(*new_options)._update_compile_options(
+ {"_current_path": effective_path}
+ )
+ if user_defined_options:
+ q = q.options(*user_defined_options)
+
+ if context.populate_existing:
+ q = q.execution_options(populate_existing=True)
+
+ if self.parent_property.order_by:
+ if not query_info.load_with_join:
+ eager_order_by = self.parent_property.order_by
+ if effective_entity.is_aliased_class:
+ eager_order_by = [
+ effective_entity._adapt_element(elem)
+ for elem in eager_order_by
+ ]
+ q = q.order_by(*eager_order_by)
+ else:
+
+ def _setup_outermost_orderby(compile_context):
+ compile_context.eager_order_by += tuple(
+ util.to_list(self.parent_property.order_by)
+ )
+
+ q = q._add_context_option(
+ _setup_outermost_orderby, self.parent_property
+ )
+
+ if query_info.load_only_child:
+ self._load_via_child(
+ our_states, none_states, query_info, q, context
+ )
+ else:
+ self._load_via_parent(our_states, query_info, q, context)
+
+ def _load_via_child(self, our_states, none_states, query_info, q, context):
+ uselist = self.uselist
+
+ # this sort is really for the benefit of the unit tests
+ our_keys = sorted(our_states)
+ while our_keys:
+ chunk = our_keys[0 : self._chunksize]
+ our_keys = our_keys[self._chunksize :]
+ data = {
+ k: v
+ for k, v in context.session.execute(
+ q,
+ params={
+ "primary_keys": [
+ key[0] if query_info.zero_idx else key
+ for key in chunk
+ ]
+ },
+ ).unique()
+ }
+
+ for key in chunk:
+ # for a real foreign key and no concurrent changes to the
+ # DB while running this method, "key" is always present in
+ # data. However, for primaryjoins without real foreign keys
+ # a non-None primaryjoin condition may still refer to no
+ # related object.
+ related_obj = data.get(key, None)
+ for state, dict_, overwrite in our_states[key]:
+ if not overwrite and self.key in dict_:
+ continue
+
+ state.get_impl(self.key).set_committed_value(
+ state,
+ dict_,
+ related_obj if not uselist else [related_obj],
+ )
+ # populate none states with empty value / collection
+ for state, dict_, overwrite in none_states:
+ if not overwrite and self.key in dict_:
+ continue
+
+ # note it's OK if this is a uselist=True attribute, the empty
+ # collection will be populated
+ state.get_impl(self.key).set_committed_value(state, dict_, None)
+
+ def _load_via_parent(self, our_states, query_info, q, context):
+ uselist = self.uselist
+ _empty_result = () if uselist else None
+
+ while our_states:
+ chunk = our_states[0 : self._chunksize]
+ our_states = our_states[self._chunksize :]
+
+ primary_keys = [
+ key[0] if query_info.zero_idx else key
+ for key, state, state_dict, overwrite in chunk
+ ]
+
+ data = collections.defaultdict(list)
+ for k, v in itertools.groupby(
+ context.session.execute(
+ q, params={"primary_keys": primary_keys}
+ ).unique(),
+ lambda x: x[0],
+ ):
+ data[k].extend(vv[1] for vv in v)
+
+ for key, state, state_dict, overwrite in chunk:
+
+ if not overwrite and self.key in state_dict:
+ continue
+
+ collection = data.get(key, _empty_result)
+
+ if not uselist and collection:
+ if len(collection) > 1:
+ util.warn(
+ "Multiple rows returned with "
+ "uselist=False for eagerly-loaded "
+ "attribute '%s' " % self
+ )
+ state.get_impl(self.key).set_committed_value(
+ state, state_dict, collection[0]
+ )
+ else:
+ # note that empty tuple set on uselist=False sets the
+ # value to None
+ state.get_impl(self.key).set_committed_value(
+ state, state_dict, collection
+ )
+
+
+def single_parent_validator(desc, prop):
+ def _do_check(state, value, oldvalue, initiator):
+ if value is not None and initiator.key == prop.key:
+ hasparent = initiator.hasparent(attributes.instance_state(value))
+ if hasparent and oldvalue is not value:
+ raise sa_exc.InvalidRequestError(
+ "Instance %s is already associated with an instance "
+ "of %s via its %s attribute, and is only allowed a "
+ "single parent."
+ % (orm_util.instance_str(value), state.class_, prop),
+ code="bbf1",
+ )
+ return value
+
+ def append(state, value, initiator):
+ return _do_check(state, value, None, initiator)
+
+ def set_(state, value, oldvalue, initiator):
+ return _do_check(state, value, oldvalue, initiator)
+
+ event.listen(
+ desc, "append", append, raw=True, retval=True, active_history=True
+ )
+ event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py
new file mode 100644
index 0000000..c3dd5df
--- /dev/null
+++ b/lib/sqlalchemy/orm/strategy_options.py
@@ -0,0 +1,2008 @@
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""
+
+"""
+
+from . import util as orm_util
+from .attributes import QueryableAttribute
+from .base import _class_to_mapper
+from .base import _is_aliased_class
+from .base import _is_mapped_class
+from .base import InspectionAttr
+from .interfaces import LoaderOption
+from .interfaces import MapperProperty
+from .interfaces import PropComparator
+from .path_registry import _DEFAULT_TOKEN
+from .path_registry import _WILDCARD_TOKEN
+from .path_registry import PathRegistry
+from .path_registry import TokenRegistry
+from .util import _orm_full_deannotate
+from .. import exc as sa_exc
+from .. import inspect
+from .. import util
+from ..sql import and_
+from ..sql import coercions
+from ..sql import roles
+from ..sql import traversals
+from ..sql import visitors
+from ..sql.base import _generative
+from ..sql.base import Generative
+
+
+class Load(Generative, LoaderOption):
+ """Represents loader options which modify the state of a
+ :class:`_query.Query` in order to affect how various mapped attributes are
+ loaded.
+
+ The :class:`_orm.Load` object is in most cases used implicitly behind the
+ scenes when one makes use of a query option like :func:`_orm.joinedload`,
+ :func:`.defer`, or similar. However, the :class:`_orm.Load` object
+ can also be used directly, and in some cases can be useful.
+
+ To use :class:`_orm.Load` directly, instantiate it with the target mapped
+ class as the argument. This style of usage is
+ useful when dealing with a :class:`_query.Query`
+ that has multiple entities::
+
+ myopt = Load(MyClass).joinedload("widgets")
+
+ The above ``myopt`` can now be used with :meth:`_query.Query.options`,
+ where it
+ will only take effect for the ``MyClass`` entity::
+
+ session.query(MyClass, MyOtherClass).options(myopt)
+
+ One case where :class:`_orm.Load`
+ is useful as public API is when specifying
+ "wildcard" options that only take effect for a certain class::
+
+ session.query(Order).options(Load(Order).lazyload('*'))
+
+ Above, all relationships on ``Order`` will be lazy-loaded, but other
+ attributes on those descendant objects will load using their normal
+ loader strategy.
+
+ .. seealso::
+
+ :ref:`deferred_options`
+
+ :ref:`deferred_loading_w_multiple`
+
+ :ref:`relationship_loader_options`
+
+ """
+
+ _is_strategy_option = True
+
+ _cache_key_traversal = [
+ ("path", visitors.ExtendedInternalTraversal.dp_has_cache_key),
+ ("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj),
+ ("_of_type", visitors.ExtendedInternalTraversal.dp_multi),
+ ("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
+ (
+ "_context_cache_key",
+ visitors.ExtendedInternalTraversal.dp_has_cache_key_tuples,
+ ),
+ (
+ "local_opts",
+ visitors.ExtendedInternalTraversal.dp_string_multi_dict,
+ ),
+ ]
+
+ def __init__(self, entity):
+ insp = inspect(entity)
+ insp._post_inspect
+
+ self.path = insp._path_registry
+ # note that this .context is shared among all descendant
+ # Load objects
+ self.context = util.OrderedDict()
+ self.local_opts = {}
+ self.is_class_strategy = False
+
+ @classmethod
+ def for_existing_path(cls, path):
+ load = cls.__new__(cls)
+ load.path = path
+ load.context = {}
+ load.local_opts = {}
+ load._of_type = None
+ load._extra_criteria = ()
+ return load
+
+ def _generate_extra_criteria(self, context):
+ """Apply the current bound parameters in a QueryContext to the
+ immediate "extra_criteria" stored with this Load object.
+
+ Load objects are typically pulled from the cached version of
+ the statement from a QueryContext. The statement currently being
+ executed will have new values (and keys) for bound parameters in the
+ extra criteria which need to be applied by loader strategies when
+ they handle this criteria for a result set.
+
+ """
+
+ assert (
+ self._extra_criteria
+ ), "this should only be called if _extra_criteria is present"
+
+ orig_query = context.compile_state.select_statement
+ current_query = context.query
+
+ # NOTE: while it seems like we should not do the "apply" operation
+ # here if orig_query is current_query, skipping it in the "optimized"
+ # case causes the query to be different from a cache key perspective,
+ # because we are creating a copy of the criteria which is no longer
+ # the same identity of the _extra_criteria in the loader option
+ # itself. cache key logic produces a different key for
+ # (A, copy_of_A) vs. (A, A), because in the latter case it shortens
+ # the second part of the key to just indicate on identity.
+
+ # if orig_query is current_query:
+ # not cached yet. just do the and_()
+ # return and_(*self._extra_criteria)
+
+ k1 = orig_query._generate_cache_key()
+ k2 = current_query._generate_cache_key()
+
+ return k2._apply_params_to_element(k1, and_(*self._extra_criteria))
+
+ def _adjust_for_extra_criteria(self, context):
+ """Apply the current bound parameters in a QueryContext to all
+ occurrences "extra_criteria" stored within al this Load object;
+ copying in place.
+
+ """
+ orig_query = context.compile_state.select_statement
+
+ applied = {}
+
+ ck = [None, None]
+
+ def process(opt):
+ if not opt._extra_criteria:
+ return
+
+ if ck[0] is None:
+ ck[:] = (
+ orig_query._generate_cache_key(),
+ context.query._generate_cache_key(),
+ )
+ k1, k2 = ck
+
+ opt._extra_criteria = tuple(
+ k2._apply_params_to_element(k1, crit)
+ for crit in opt._extra_criteria
+ )
+
+ return self._deep_clone(applied, process)
+
+ def _deep_clone(self, applied, process):
+ if self in applied:
+ return applied[self]
+
+ cloned = self._generate()
+
+ applied[self] = cloned
+
+ cloned.strategy = self.strategy
+
+ assert cloned.propagate_to_loaders == self.propagate_to_loaders
+ assert cloned.is_class_strategy == self.is_class_strategy
+ assert cloned.is_opts_only == self.is_opts_only
+
+ if self.context:
+ cloned.context = util.OrderedDict(
+ [
+ (
+ key,
+ value._deep_clone(applied, process)
+ if isinstance(value, Load)
+ else value,
+ )
+ for key, value in self.context.items()
+ ]
+ )
+
+ cloned.local_opts.update(self.local_opts)
+
+ process(cloned)
+
+ return cloned
+
+ @property
+ def _context_cache_key(self):
+ serialized = []
+ if self.context is None:
+ return []
+ for (key, loader_path), obj in self.context.items():
+ if key != "loader":
+ continue
+ serialized.append(loader_path + (obj,))
+ return serialized
+
+ def _generate(self):
+ cloned = super(Load, self)._generate()
+ cloned.local_opts = {}
+ return cloned
+
+ is_opts_only = False
+ is_class_strategy = False
+ strategy = None
+ propagate_to_loaders = False
+ _of_type = None
+ _extra_criteria = ()
+
+ def process_compile_state_replaced_entities(
+ self, compile_state, mapper_entities
+ ):
+ if not compile_state.compile_options._enable_eagerloads:
+ return
+
+ # process is being run here so that the options given are validated
+ # against what the lead entities were, as well as to accommodate
+ # for the entities having been replaced with equivalents
+ self._process(
+ compile_state,
+ mapper_entities,
+ not bool(compile_state.current_path),
+ )
+
+ def process_compile_state(self, compile_state):
+ if not compile_state.compile_options._enable_eagerloads:
+ return
+
+ self._process(
+ compile_state,
+ compile_state._lead_mapper_entities,
+ not bool(compile_state.current_path)
+ and not compile_state.compile_options._for_refresh_state,
+ )
+
+ def _process(self, compile_state, mapper_entities, raiseerr):
+ is_refresh = compile_state.compile_options._for_refresh_state
+ current_path = compile_state.current_path
+ if current_path:
+ for (token, start_path), loader in self.context.items():
+ if is_refresh and not loader.propagate_to_loaders:
+ continue
+ chopped_start_path = self._chop_path(start_path, current_path)
+ if chopped_start_path is not None:
+ compile_state.attributes[
+ (token, chopped_start_path)
+ ] = loader
+ else:
+ compile_state.attributes.update(self.context)
+
+ def _generate_path(
+ self,
+ path,
+ attr,
+ for_strategy,
+ wildcard_key,
+ raiseerr=True,
+ polymorphic_entity_context=None,
+ ):
+ existing_of_type = self._of_type
+ self._of_type = None
+ if raiseerr and not path.has_entity:
+ if isinstance(path, TokenRegistry):
+ raise sa_exc.ArgumentError(
+ "Wildcard token cannot be followed by another entity"
+ )
+ else:
+ raise sa_exc.ArgumentError(
+ "Mapped attribute '%s' does not "
+ "refer to a mapped entity" % (path.prop,)
+ )
+
+ if isinstance(attr, util.string_types):
+
+ default_token = attr.endswith(_DEFAULT_TOKEN)
+ attr_str_name = attr
+ if attr.endswith(_WILDCARD_TOKEN) or default_token:
+ if default_token:
+ self.propagate_to_loaders = False
+ if wildcard_key:
+ attr = "%s:%s" % (wildcard_key, attr)
+
+ # TODO: AliasedInsp inside the path for of_type is not
+ # working for a with_polymorphic entity because the
+ # relationship loaders don't render the with_poly into the
+ # path. See #4469 which will try to improve this
+ if existing_of_type and not existing_of_type.is_aliased_class:
+ path = path.parent[existing_of_type]
+ path = path.token(attr)
+ self.path = path
+ return path
+
+ if existing_of_type:
+ ent = inspect(existing_of_type)
+ else:
+ ent = path.entity
+
+ util.warn_deprecated_20(
+ "Using strings to indicate column or "
+ "relationship paths in loader options is deprecated "
+ "and will be removed in SQLAlchemy 2.0. Please use "
+ "the class-bound attribute directly.",
+ )
+ try:
+ # use getattr on the class to work around
+ # synonyms, hybrids, etc.
+ attr = getattr(ent.class_, attr)
+ except AttributeError as err:
+ if raiseerr:
+ util.raise_(
+ sa_exc.ArgumentError(
+ 'Can\'t find property named "%s" on '
+ "%s in this Query." % (attr, ent)
+ ),
+ replace_context=err,
+ )
+ else:
+ return None
+ else:
+ try:
+ attr = found_property = attr.property
+ except AttributeError as ae:
+ if not isinstance(attr, MapperProperty):
+ util.raise_(
+ sa_exc.ArgumentError(
+ 'Expected attribute "%s" on %s to be a '
+ "mapped attribute; "
+ "instead got %s object."
+ % (attr_str_name, ent, type(attr))
+ ),
+ replace_context=ae,
+ )
+ else:
+ raise
+
+ path = path[attr]
+ else:
+ insp = inspect(attr)
+
+ if insp.is_mapper or insp.is_aliased_class:
+ # TODO: this does not appear to be a valid codepath. "attr"
+ # would never be a mapper. This block is present in 1.2
+ # as well however does not seem to be accessed in any tests.
+ if not orm_util._entity_corresponds_to_use_path_impl(
+ attr.parent, path[-1]
+ ):
+ if raiseerr:
+ raise sa_exc.ArgumentError(
+ "Attribute '%s' does not "
+ "link from element '%s'" % (attr, path.entity)
+ )
+ else:
+ return None
+ elif insp.is_property:
+ prop = found_property = attr
+ path = path[prop]
+ elif insp.is_attribute:
+ prop = found_property = attr.property
+
+ if not orm_util._entity_corresponds_to_use_path_impl(
+ attr.parent, path[-1]
+ ):
+ if raiseerr:
+ raise sa_exc.ArgumentError(
+ 'Attribute "%s" does not '
+ 'link from element "%s".%s'
+ % (
+ attr,
+ path.entity,
+ (
+ " Did you mean to use "
+ "%s.of_type(%s)?"
+ % (path[-2], attr.class_.__name__)
+ if len(path) > 1
+ and path.entity.is_mapper
+ and attr.parent.is_aliased_class
+ else ""
+ ),
+ )
+ )
+ else:
+ return None
+
+ if attr._extra_criteria and not self._extra_criteria:
+ # in most cases, the process that brings us here will have
+ # already established _extra_criteria. however if not,
+ # and it's present on the attribute, then use that.
+ self._extra_criteria = attr._extra_criteria
+
+ if getattr(attr, "_of_type", None):
+ ac = attr._of_type
+ ext_info = of_type_info = inspect(ac)
+
+ if polymorphic_entity_context is None:
+ polymorphic_entity_context = self.context
+
+ existing = path.entity_path[prop].get(
+ polymorphic_entity_context, "path_with_polymorphic"
+ )
+
+ if not ext_info.is_aliased_class:
+ ac = orm_util.with_polymorphic(
+ ext_info.mapper.base_mapper,
+ ext_info.mapper,
+ aliased=True,
+ _use_mapper_path=True,
+ _existing_alias=inspect(existing)
+ if existing is not None
+ else None,
+ )
+
+ ext_info = inspect(ac)
+
+ path.entity_path[prop].set(
+ polymorphic_entity_context, "path_with_polymorphic", ac
+ )
+
+ path = path[prop][ext_info]
+
+ self._of_type = of_type_info
+
+ else:
+ path = path[prop]
+
+ if for_strategy is not None:
+ found_property._get_strategy(for_strategy)
+ if path.has_entity:
+ path = path.entity_path
+ self.path = path
+ return path
+
+ def __str__(self):
+ return "Load(strategy=%r)" % (self.strategy,)
+
+ def _coerce_strat(self, strategy):
+ if strategy is not None:
+ strategy = tuple(sorted(strategy.items()))
+ return strategy
+
+ def _apply_to_parent(self, parent, applied, bound):
+ raise NotImplementedError(
+ "Only 'unbound' loader options may be used with the "
+ "Load.options() method"
+ )
+
+ @_generative
+ def options(self, *opts):
+ r"""Apply a series of options as sub-options to this
+ :class:`_orm.Load`
+ object.
+
+ E.g.::
+
+ query = session.query(Author)
+ query = query.options(
+ joinedload(Author.book).options(
+ load_only(Book.summary, Book.excerpt),
+ joinedload(Book.citations).options(
+ joinedload(Citation.author)
+ )
+ )
+ )
+
+ :param \*opts: A series of loader option objects (ultimately
+ :class:`_orm.Load` objects) which should be applied to the path
+ specified by this :class:`_orm.Load` object.
+
+ .. versionadded:: 1.3.6
+
+ .. seealso::
+
+ :func:`.defaultload`
+
+ :ref:`relationship_loader_options`
+
+ :ref:`deferred_loading_w_multiple`
+
+ """
+ apply_cache = {}
+ bound = not isinstance(self, _UnboundLoad)
+ if bound:
+ raise NotImplementedError(
+ "The options() method is currently only supported "
+ "for 'unbound' loader options"
+ )
+ for opt in opts:
+ opt._apply_to_parent(self, apply_cache, bound)
+
+ @_generative
+ def set_relationship_strategy(
+ self, attr, strategy, propagate_to_loaders=True
+ ):
+ strategy = self._coerce_strat(strategy)
+ self.propagate_to_loaders = propagate_to_loaders
+ cloned = self._clone_for_bind_strategy(attr, strategy, "relationship")
+ self.path = cloned.path
+ self._of_type = cloned._of_type
+ self._extra_criteria = cloned._extra_criteria
+ cloned.is_class_strategy = self.is_class_strategy = False
+ self.propagate_to_loaders = cloned.propagate_to_loaders
+
+ @_generative
+ def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
+ strategy = self._coerce_strat(strategy)
+ self.is_class_strategy = False
+ for attr in attrs:
+ cloned = self._clone_for_bind_strategy(
+ attr, strategy, "column", opts_only=opts_only, opts=opts
+ )
+ cloned.propagate_to_loaders = True
+
+ @_generative
+ def set_generic_strategy(self, attrs, strategy):
+ strategy = self._coerce_strat(strategy)
+ for attr in attrs:
+ cloned = self._clone_for_bind_strategy(attr, strategy, None)
+ cloned.propagate_to_loaders = True
+
+ @_generative
+ def set_class_strategy(self, strategy, opts):
+ strategy = self._coerce_strat(strategy)
+ cloned = self._clone_for_bind_strategy(None, strategy, None)
+ cloned.is_class_strategy = True
+ cloned.propagate_to_loaders = True
+ cloned.local_opts.update(opts)
+
+ def _clone_for_bind_strategy(
+ self, attr, strategy, wildcard_key, opts_only=False, opts=None
+ ):
+ """Create an anonymous clone of the Load/_UnboundLoad that is suitable
+ to be placed in the context / _to_bind collection of this Load
+ object. The clone will then lose references to context/_to_bind
+ in order to not create reference cycles.
+
+ """
+ cloned = self._generate()
+ cloned._generate_path(self.path, attr, strategy, wildcard_key)
+ cloned.strategy = strategy
+
+ cloned.local_opts = self.local_opts
+ if opts:
+ cloned.local_opts.update(opts)
+ if opts_only:
+ cloned.is_opts_only = True
+
+ if strategy or cloned.is_opts_only:
+ cloned._set_path_strategy()
+ return cloned
+
+ def _set_for_path(self, context, path, replace=True, merge_opts=False):
+ if merge_opts or not replace:
+ existing = path.get(context, "loader")
+ if existing:
+ if merge_opts:
+ existing.local_opts.update(self.local_opts)
+ existing._extra_criteria += self._extra_criteria
+ else:
+ path.set(context, "loader", self)
+ else:
+ existing = path.get(context, "loader")
+ path.set(context, "loader", self)
+ if existing and existing.is_opts_only:
+ self.local_opts.update(existing.local_opts)
+ existing._extra_criteria += self._extra_criteria
+
+ def _set_path_strategy(self):
+ if not self.is_class_strategy and self.path.has_entity:
+ effective_path = self.path.parent
+ else:
+ effective_path = self.path
+
+ if effective_path.is_token:
+ for path in effective_path.generate_for_superclasses():
+ self._set_for_path(
+ self.context,
+ path,
+ replace=True,
+ merge_opts=self.is_opts_only,
+ )
+ else:
+ self._set_for_path(
+ self.context,
+ effective_path,
+ replace=True,
+ merge_opts=self.is_opts_only,
+ )
+
+ # remove cycles; _set_path_strategy is always invoked on an
+ # anonymous clone of the Load / UnboundLoad object since #5056
+ self.context = None
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+
+ # can't pickle this right now; warning is raised by strategies
+ d["_extra_criteria"] = ()
+
+ if d["context"] is not None:
+ d["context"] = PathRegistry.serialize_context_dict(
+ d["context"], ("loader",)
+ )
+ d["path"] = self.path.serialize()
+ return d
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.path = PathRegistry.deserialize(self.path)
+ if self.context is not None:
+ self.context = PathRegistry.deserialize_context_dict(self.context)
+
+ def _chop_path(self, to_chop, path):
+ i = -1
+
+ for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
+ if isinstance(c_token, util.string_types):
+ # TODO: this is approximated from the _UnboundLoad
+ # version and probably has issues, not fully covered.
+
+ if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
+ return to_chop
+ elif (
+ c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
+ and c_token != p_token.key
+ ):
+ return None
+
+ if c_token is p_token:
+ continue
+ elif (
+ isinstance(c_token, InspectionAttr)
+ and c_token.is_mapper
+ and p_token.is_mapper
+ and c_token.isa(p_token)
+ ):
+ continue
+ else:
+ return None
+ return to_chop[i + 1 :]
+
+
+class _UnboundLoad(Load):
+ """Represent a loader option that isn't tied to a root entity.
+
+ The loader option will produce an entity-linked :class:`_orm.Load`
+ object when it is passed :meth:`_query.Query.options`.
+
+ This provides compatibility with the traditional system
+ of freestanding options, e.g. ``joinedload('x.y.z')``.
+
+ """
+
+ def __init__(self):
+ self.path = ()
+ self._to_bind = []
+ self.local_opts = {}
+ self._extra_criteria = ()
+
+ def _gen_cache_key(self, anon_map, bindparams, _unbound_option_seen=None):
+ """Inlined gen_cache_key
+
+ Original traversal is::
+
+
+ _cache_key_traversal = [
+ ("path", visitors.ExtendedInternalTraversal.dp_multi_list),
+ ("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj),
+ (
+ "_to_bind",
+ visitors.ExtendedInternalTraversal.dp_has_cache_key_list,
+ ),
+ (
+ "_extra_criteria",
+ visitors.InternalTraversal.dp_clauseelement_list),
+ (
+ "local_opts",
+ visitors.ExtendedInternalTraversal.dp_string_multi_dict,
+ ),
+ ]
+
+ The inlining is so that the "_to_bind" list can be flattened to not
+ repeat the same UnboundLoad options over and over again.
+
+ See #6869
+
+ """
+
+ idself = id(self)
+ cls = self.__class__
+
+ if idself in anon_map:
+ return (anon_map[idself], cls)
+ else:
+ id_ = anon_map[idself]
+
+ vis = traversals._cache_key_traversal_visitor
+
+ seen = _unbound_option_seen
+ if seen is None:
+ seen = set()
+
+ return (
+ (id_, cls)
+ + vis.visit_multi_list(
+ "path", self.path, self, anon_map, bindparams
+ )
+ + ("strategy", self.strategy)
+ + (
+ (
+ "_to_bind",
+ tuple(
+ elem._gen_cache_key(
+ anon_map, bindparams, _unbound_option_seen=seen
+ )
+ for elem in self._to_bind
+ if elem not in seen and not seen.add(elem)
+ ),
+ )
+ if self._to_bind
+ else ()
+ )
+ + (
+ (
+ "_extra_criteria",
+ tuple(
+ elem._gen_cache_key(anon_map, bindparams)
+ for elem in self._extra_criteria
+ ),
+ )
+ if self._extra_criteria
+ else ()
+ )
+ + (
+ vis.visit_string_multi_dict(
+ "local_opts", self.local_opts, self, anon_map, bindparams
+ )
+ if self.local_opts
+ else ()
+ )
+ )
+
+ _is_chain_link = False
+
+ def _set_path_strategy(self):
+ self._to_bind.append(self)
+
+ # remove cycles; _set_path_strategy is always invoked on an
+ # anonymous clone of the Load / UnboundLoad object since #5056
+ self._to_bind = None
+
+ def _deep_clone(self, applied, process):
+ if self in applied:
+ return applied[self]
+
+ cloned = self._generate()
+
+ applied[self] = cloned
+
+ cloned.strategy = self.strategy
+
+ assert cloned.propagate_to_loaders == self.propagate_to_loaders
+ assert cloned.is_class_strategy == self.is_class_strategy
+ assert cloned.is_opts_only == self.is_opts_only
+
+ cloned._to_bind = [
+ elem._deep_clone(applied, process) for elem in self._to_bind or ()
+ ]
+
+ cloned.local_opts.update(self.local_opts)
+
+ process(cloned)
+
+ return cloned
+
+ def _apply_to_parent(self, parent, applied, bound, to_bind=None):
+ if self in applied:
+ return applied[self]
+
+ if to_bind is None:
+ to_bind = self._to_bind
+
+ cloned = self._generate()
+
+ applied[self] = cloned
+
+ cloned.strategy = self.strategy
+ if self.path:
+ attr = self.path[-1]
+ if isinstance(attr, util.string_types) and attr.endswith(
+ _DEFAULT_TOKEN
+ ):
+ attr = attr.split(":")[0] + ":" + _WILDCARD_TOKEN
+ cloned._generate_path(
+ parent.path + self.path[0:-1], attr, self.strategy, None
+ )
+
+ # these assertions can go away once the "sub options" API is
+ # mature
+ assert cloned.propagate_to_loaders == self.propagate_to_loaders
+ assert cloned.is_class_strategy == self.is_class_strategy
+ assert cloned.is_opts_only == self.is_opts_only
+
+ uniq = set()
+
+ cloned._to_bind = parent._to_bind
+
+ cloned._to_bind[:] = [
+ elem
+ for elem in cloned._to_bind
+ if elem not in uniq and not uniq.add(elem)
+ ] + [
+ elem._apply_to_parent(parent, applied, bound, to_bind)
+ for elem in to_bind
+ if elem not in uniq and not uniq.add(elem)
+ ]
+
+ cloned.local_opts.update(self.local_opts)
+
+ return cloned
+
+ def _generate_path(self, path, attr, for_strategy, wildcard_key):
+ if (
+ wildcard_key
+ and isinstance(attr, util.string_types)
+ and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN)
+ ):
+ if attr == _DEFAULT_TOKEN:
+ self.propagate_to_loaders = False
+ attr = "%s:%s" % (wildcard_key, attr)
+ if path and _is_mapped_class(path[-1]) and not self.is_class_strategy:
+ path = path[0:-1]
+ if attr:
+ path = path + (attr,)
+ self.path = path
+ self._extra_criteria = getattr(attr, "_extra_criteria", ())
+
+ return path
+
+ def __getstate__(self):
+ d = self.__dict__.copy()
+
+ # can't pickle this right now; warning is raised by strategies
+ d["_extra_criteria"] = ()
+
+ d["path"] = self._serialize_path(self.path, filter_aliased_class=True)
+ return d
+
+ def __setstate__(self, state):
+ ret = []
+ for key in state["path"]:
+ if isinstance(key, tuple):
+ if len(key) == 2:
+ # support legacy
+ cls, propkey = key
+ of_type = None
+ else:
+ cls, propkey, of_type = key
+ prop = getattr(cls, propkey)
+ if of_type:
+ prop = prop.of_type(of_type)
+ ret.append(prop)
+ else:
+ ret.append(key)
+ state["path"] = tuple(ret)
+ self.__dict__ = state
+
+ def _process(self, compile_state, mapper_entities, raiseerr):
+ dedupes = compile_state.attributes["_unbound_load_dedupes"]
+ is_refresh = compile_state.compile_options._for_refresh_state
+ for val in self._to_bind:
+ if val not in dedupes:
+ dedupes.add(val)
+ if is_refresh and not val.propagate_to_loaders:
+ continue
+ val._bind_loader(
+ [ent.entity_zero for ent in mapper_entities],
+ compile_state.current_path,
+ compile_state.attributes,
+ raiseerr,
+ )
+
+ @classmethod
+ def _from_keys(cls, meth, keys, chained, kw):
+ opt = _UnboundLoad()
+
+ def _split_key(key):
+ if isinstance(key, util.string_types):
+ # coerce fooload('*') into "default loader strategy"
+ if key == _WILDCARD_TOKEN:
+ return (_DEFAULT_TOKEN,)
+ # coerce fooload(".*") into "wildcard on default entity"
+ elif key.startswith("." + _WILDCARD_TOKEN):
+ util.warn_deprecated(
+ "The undocumented `.{WILDCARD}` format is deprecated "
+ "and will be removed in a future version as it is "
+ "believed to be unused. "
+ "If you have been using this functionality, please "
+ "comment on Issue #4390 on the SQLAlchemy project "
+ "tracker.",
+ version="1.4",
+ )
+ key = key[1:]
+ return key.split(".")
+ else:
+ return (key,)
+
+ all_tokens = [token for key in keys for token in _split_key(key)]
+
+ for token in all_tokens[0:-1]:
+ # set _is_chain_link first so that clones of the
+ # object also inherit this flag
+ opt._is_chain_link = True
+ if chained:
+ opt = meth(opt, token, **kw)
+ else:
+ opt = opt.defaultload(token)
+
+ opt = meth(opt, all_tokens[-1], **kw)
+ opt._is_chain_link = False
+ return opt
+
+ def _chop_path(self, to_chop, path):
+ i = -1
+ for i, (c_token, (p_entity, p_prop)) in enumerate(
+ zip(to_chop, path.pairs())
+ ):
+ if isinstance(c_token, util.string_types):
+ if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
+ return to_chop
+ elif (
+ c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
+ and c_token != p_prop.key
+ ):
+ return None
+ elif isinstance(c_token, PropComparator):
+ if c_token.property is not p_prop or (
+ c_token._parententity is not p_entity
+ and (
+ not c_token._parententity.is_mapper
+ or not c_token._parententity.isa(p_entity)
+ )
+ ):
+ return None
+ else:
+ i += 1
+
+ return to_chop[i:]
+
+ def _serialize_path(self, path, filter_aliased_class=False):
+ ret = []
+ for token in path:
+ if isinstance(token, QueryableAttribute):
+ if (
+ filter_aliased_class
+ and token._of_type
+ and inspect(token._of_type).is_aliased_class
+ ):
+ ret.append((token._parentmapper.class_, token.key, None))
+ else:
+ ret.append(
+ (
+ token._parentmapper.class_,
+ token.key,
+ token._of_type.entity if token._of_type else None,
+ )
+ )
+ elif isinstance(token, PropComparator):
+ ret.append((token._parentmapper.class_, token.key, None))
+ else:
+ ret.append(token)
+ return ret
+
+ def _bind_loader(self, entities, current_path, context, raiseerr):
+ """Convert from an _UnboundLoad() object into a Load() object.
+
+ The _UnboundLoad() uses an informal "path" and does not necessarily
+ refer to a lead entity as it may use string tokens. The Load()
+ OTOH refers to a complete path. This method reconciles from a
+ given Query into a Load.
+
+ Example::
+
+
+ query = session.query(User).options(
+ joinedload("orders").joinedload("items"))
+
+ The above options will be an _UnboundLoad object along the lines
+ of (note this is not the exact API of _UnboundLoad)::
+
+ _UnboundLoad(
+ _to_bind=[
+ _UnboundLoad(["orders"], {"lazy": "joined"}),
+ _UnboundLoad(["orders", "items"], {"lazy": "joined"}),
+ ]
+ )
+
+ After this method, we get something more like this (again this is
+ not exact API)::
+
+ Load(
+ User,
+ (User, User.orders.property))
+ Load(
+ User,
+ (User, User.orders.property, Order, Order.items.property))
+
+ """
+
+ start_path = self.path
+
+ if self.is_class_strategy and current_path:
+ start_path += (entities[0],)
+
+ # _current_path implies we're in a
+ # secondary load with an existing path
+
+ if current_path:
+ start_path = self._chop_path(start_path, current_path)
+
+ if not start_path:
+ return None
+
+ # look at the first token and try to locate within the Query
+ # what entity we are referring towards.
+ token = start_path[0]
+
+ if isinstance(token, util.string_types):
+ entity = self._find_entity_basestring(entities, token, raiseerr)
+ elif isinstance(token, PropComparator):
+ prop = token.property
+ entity = self._find_entity_prop_comparator(
+ entities, prop, token._parententity, raiseerr
+ )
+ elif self.is_class_strategy and _is_mapped_class(token):
+ entity = inspect(token)
+ if entity not in entities:
+ entity = None
+ else:
+ raise sa_exc.ArgumentError(
+ "mapper option expects " "string key or list of attributes"
+ )
+
+ if not entity:
+ return
+
+ path_element = entity
+
+ # transfer our entity-less state into a Load() object
+ # with a real entity path. Start with the lead entity
+ # we just located, then go through the rest of our path
+ # tokens and populate into the Load().
+ loader = Load(path_element)
+
+ if context is None:
+ context = loader.context
+
+ loader.strategy = self.strategy
+ loader.is_opts_only = self.is_opts_only
+ loader.is_class_strategy = self.is_class_strategy
+ loader._extra_criteria = self._extra_criteria
+
+ path = loader.path
+
+ if not loader.is_class_strategy:
+ for idx, token in enumerate(start_path):
+ if not loader._generate_path(
+ loader.path,
+ token,
+ self.strategy if idx == len(start_path) - 1 else None,
+ None,
+ raiseerr,
+ polymorphic_entity_context=context,
+ ):
+ return
+
+ loader.local_opts.update(self.local_opts)
+
+ if not loader.is_class_strategy and loader.path.has_entity:
+ effective_path = loader.path.parent
+ else:
+ effective_path = loader.path
+
+ # prioritize "first class" options over those
+ # that were "links in the chain", e.g. "x" and "y" in
+ # someload("x.y.z") versus someload("x") / someload("x.y")
+
+ if effective_path.is_token:
+ for path in effective_path.generate_for_superclasses():
+ loader._set_for_path(
+ context,
+ path,
+ replace=not self._is_chain_link,
+ merge_opts=self.is_opts_only,
+ )
+ else:
+ loader._set_for_path(
+ context,
+ effective_path,
+ replace=not self._is_chain_link,
+ merge_opts=self.is_opts_only,
+ )
+
+ return loader
+
+ def _find_entity_prop_comparator(self, entities, prop, mapper, raiseerr):
+ if _is_aliased_class(mapper):
+ searchfor = mapper
+ else:
+ searchfor = _class_to_mapper(mapper)
+ for ent in entities:
+ if orm_util._entity_corresponds_to(ent, searchfor):
+ return ent
+ else:
+ if raiseerr:
+ if not list(entities):
+ raise sa_exc.ArgumentError(
+ "Query has only expression-based entities, "
+ 'which do not apply to %s "%s"'
+ % (util.clsname_as_plain_name(type(prop)), prop)
+ )
+ else:
+ raise sa_exc.ArgumentError(
+ 'Mapped attribute "%s" does not apply to any of the '
+ "root entities in this query, e.g. %s. Please "
+ "specify the full path "
+ "from one of the root entities to the target "
+ "attribute. "
+ % (prop, ", ".join(str(x) for x in entities))
+ )
+ else:
+ return None
+
+ def _find_entity_basestring(self, entities, token, raiseerr):
+ if token.endswith(":" + _WILDCARD_TOKEN):
+ if len(list(entities)) != 1:
+ if raiseerr:
+ raise sa_exc.ArgumentError(
+ "Can't apply wildcard ('*') or load_only() "
+ "loader option to multiple entities %s. Specify "
+ "loader options for each entity individually, such "
+ "as %s."
+ % (
+ ", ".join(str(ent) for ent in entities),
+ ", ".join(
+ "Load(%s).some_option('*')" % ent
+ for ent in entities
+ ),
+ )
+ )
+ elif token.endswith(_DEFAULT_TOKEN):
+ raiseerr = False
+
+ for ent in entities:
+ # return only the first _MapperEntity when searching
+ # based on string prop name. Ideally object
+ # attributes are used to specify more exactly.
+ return ent
+ else:
+ if raiseerr:
+ raise sa_exc.ArgumentError(
+ "Query has only expression-based entities - "
+ 'can\'t find property named "%s".' % (token,)
+ )
+ else:
+ return None
+
+
+class loader_option(object):
+ def __init__(self):
+ pass
+
+ def __call__(self, fn):
+ self.name = name = fn.__name__
+ self.fn = fn
+ if hasattr(Load, name):
+ raise TypeError("Load class already has a %s method." % (name))
+ setattr(Load, name, fn)
+
+ return self
+
+ def _add_unbound_fn(self, fn):
+ self._unbound_fn = fn
+ fn_doc = self.fn.__doc__
+ self.fn.__doc__ = """Produce a new :class:`_orm.Load` object with the
+:func:`_orm.%(name)s` option applied.
+
+See :func:`_orm.%(name)s` for usage examples.
+
+""" % {
+ "name": self.name
+ }
+
+ fn.__doc__ = fn_doc
+ return self
+
+ def _add_unbound_all_fn(self, fn):
+ fn.__doc__ = """Produce a standalone "all" option for
+:func:`_orm.%(name)s`.
+
+.. deprecated:: 0.9
+
+ The :func:`_orm.%(name)s_all` function is deprecated, and will be removed
+ in a future release. Please use method chaining with
+ :func:`_orm.%(name)s` instead, as in::
+
+ session.query(MyClass).options(
+ %(name)s("someattribute").%(name)s("anotherattribute")
+ )
+
+""" % {
+ "name": self.name
+ }
+ fn = util.deprecated(
+ # This is used by `baked_lazyload_all` was only deprecated in
+ # version 1.2 so this must stick around until that is removed
+ "0.9",
+ "The :func:`.%(name)s_all` function is deprecated, and will be "
+ "removed in a future release. Please use method chaining with "
+ ":func:`.%(name)s` instead" % {"name": self.name},
+ add_deprecation_to_docstring=False,
+ )(fn)
+
+ self._unbound_all_fn = fn
+ return self
+
+
+@loader_option()
+def contains_eager(loadopt, attr, alias=None):
+ r"""Indicate that the given attribute should be eagerly loaded from
+ columns stated manually in the query.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ The option is used in conjunction with an explicit join that loads
+ the desired rows, i.e.::
+
+ sess.query(Order).\
+ join(Order.user).\
+ options(contains_eager(Order.user))
+
+ The above query would join from the ``Order`` entity to its related
+ ``User`` entity, and the returned ``Order`` objects would have the
+ ``Order.user`` attribute pre-populated.
+
+ It may also be used for customizing the entries in an eagerly loaded
+ collection; queries will normally want to use the
+ :meth:`_query.Query.populate_existing` method assuming the primary
+ collection of parent objects may already have been loaded::
+
+ sess.query(User).\
+ join(User.addresses).\
+ filter(Address.email_address.like('%@aol.com')).\
+ options(contains_eager(User.addresses)).\
+ populate_existing()
+
+ See the section :ref:`contains_eager` for complete usage details.
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`contains_eager`
+
+ """
+ if alias is not None:
+ if not isinstance(alias, str):
+ info = inspect(alias)
+ alias = info.selectable
+
+ else:
+ util.warn_deprecated(
+ "Passing a string name for the 'alias' argument to "
+ "'contains_eager()` is deprecated, and will not work in a "
+ "future release. Please use a sqlalchemy.alias() or "
+ "sqlalchemy.orm.aliased() construct.",
+ version="1.4",
+ )
+
+ elif getattr(attr, "_of_type", None):
+ ot = inspect(attr._of_type)
+ alias = ot.selectable
+
+ cloned = loadopt.set_relationship_strategy(
+ attr, {"lazy": "joined"}, propagate_to_loaders=False
+ )
+ cloned.local_opts["eager_from_alias"] = alias
+ return cloned
+
+
+@contains_eager._add_unbound_fn
+def contains_eager(*keys, **kw):
+ return _UnboundLoad()._from_keys(
+ _UnboundLoad.contains_eager, keys, True, kw
+ )
+
+
+@loader_option()
+def load_only(loadopt, *attrs):
+ """Indicate that for a particular entity, only the given list
+ of column-based attribute names should be loaded; all others will be
+ deferred.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ Example - given a class ``User``, load only the ``name`` and ``fullname``
+ attributes::
+
+ session.query(User).options(load_only(User.name, User.fullname))
+
+ Example - given a relationship ``User.addresses -> Address``, specify
+ subquery loading for the ``User.addresses`` collection, but on each
+ ``Address`` object load only the ``email_address`` attribute::
+
+ session.query(User).options(
+ subqueryload(User.addresses).load_only(Address.email_address)
+ )
+
+ For a :class:`_query.Query` that has multiple entities,
+ the lead entity can be
+ specifically referred to using the :class:`_orm.Load` constructor::
+
+ session.query(User, Address).join(User.addresses).options(
+ Load(User).load_only(User.name, User.fullname),
+ Load(Address).load_only(Address.email_address)
+ )
+
+ .. note:: This method will still load a :class:`_schema.Column` even
+ if the column property is defined with ``deferred=True``
+ for the :func:`.column_property` function.
+
+ .. versionadded:: 0.9.0
+
+ """
+ cloned = loadopt.set_column_strategy(
+ attrs, {"deferred": False, "instrument": True}
+ )
+ cloned.set_column_strategy(
+ "*", {"deferred": True, "instrument": True}, {"undefer_pks": True}
+ )
+ return cloned
+
+
+@load_only._add_unbound_fn
+def load_only(*attrs):
+ return _UnboundLoad().load_only(*attrs)
+
+
+@loader_option()
+def joinedload(loadopt, attr, innerjoin=None):
+ """Indicate that the given attribute should be loaded using joined
+ eager loading.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ examples::
+
+ # joined-load the "orders" collection on "User"
+ query(User).options(joinedload(User.orders))
+
+ # joined-load Order.items and then Item.keywords
+ query(Order).options(
+ joinedload(Order.items).joinedload(Item.keywords))
+
+ # lazily load Order.items, but when Items are loaded,
+ # joined-load the keywords collection
+ query(Order).options(
+ lazyload(Order.items).joinedload(Item.keywords))
+
+ :param innerjoin: if ``True``, indicates that the joined eager load should
+ use an inner join instead of the default of left outer join::
+
+ query(Order).options(joinedload(Order.user, innerjoin=True))
+
+ In order to chain multiple eager joins together where some may be
+ OUTER and others INNER, right-nested joins are used to link them::
+
+ query(A).options(
+ joinedload(A.bs, innerjoin=False).
+ joinedload(B.cs, innerjoin=True)
+ )
+
+ The above query, linking A.bs via "outer" join and B.cs via "inner" join
+ would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
+ older versions of SQLite (< 3.7.16), this form of JOIN is translated to
+ use full subqueries as this syntax is otherwise not directly supported.
+
+ The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
+ This indicates that an INNER JOIN should be used, *unless* the join
+ is linked to a LEFT OUTER JOIN to the left, in which case it
+ will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
+ is an outerjoin::
+
+ query(A).options(
+ joinedload(A.bs).
+ joinedload(B.cs, innerjoin="unnested")
+ )
+
+ The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
+ rather than as "a LEFT OUTER JOIN (b JOIN c)".
+
+ .. note:: The "unnested" flag does **not** affect the JOIN rendered
+ from a many-to-many association table, e.g. a table configured
+ as :paramref:`_orm.relationship.secondary`, to the target table; for
+ correctness of results, these joins are always INNER and are
+ therefore right-nested if linked to an OUTER join.
+
+ .. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
+ ``innerjoin="nested"``, whereas in 0.9 it implied
+ ``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
+ inner join behavior, use the value ``innerjoin="unnested"``.
+ See :ref:`migration_3008`.
+
+ .. note::
+
+ The joins produced by :func:`_orm.joinedload` are **anonymously
+ aliased**. The criteria by which the join proceeds cannot be
+ modified, nor can the :class:`_query.Query`
+ refer to these joins in any way,
+ including ordering. See :ref:`zen_of_eager_loading` for further
+ detail.
+
+ To produce a specific SQL JOIN which is explicitly available, use
+ :meth:`_query.Query.join`.
+ To combine explicit JOINs with eager loading
+ of collections, use :func:`_orm.contains_eager`; see
+ :ref:`contains_eager`.
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`joined_eager_loading`
+
+ """
+ loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
+ if innerjoin is not None:
+ loader.local_opts["innerjoin"] = innerjoin
+ return loader
+
+
+@joinedload._add_unbound_fn
+def joinedload(*keys, **kw):
+ return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, False, kw)
+
+
+@loader_option()
+def subqueryload(loadopt, attr):
+ """Indicate that the given attribute should be loaded using
+ subquery eager loading.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ examples::
+
+ # subquery-load the "orders" collection on "User"
+ query(User).options(subqueryload(User.orders))
+
+ # subquery-load Order.items and then Item.keywords
+ query(Order).options(
+ subqueryload(Order.items).subqueryload(Item.keywords))
+
+ # lazily load Order.items, but when Items are loaded,
+ # subquery-load the keywords collection
+ query(Order).options(
+ lazyload(Order.items).subqueryload(Item.keywords))
+
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`subquery_eager_loading`
+
+ """
+ return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
+
+
+@subqueryload._add_unbound_fn
+def subqueryload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
+
+
+@loader_option()
+def selectinload(loadopt, attr):
+ """Indicate that the given attribute should be loaded using
+ SELECT IN eager loading.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ examples::
+
+ # selectin-load the "orders" collection on "User"
+ query(User).options(selectinload(User.orders))
+
+ # selectin-load Order.items and then Item.keywords
+ query(Order).options(
+ selectinload(Order.items).selectinload(Item.keywords))
+
+ # lazily load Order.items, but when Items are loaded,
+ # selectin-load the keywords collection
+ query(Order).options(
+ lazyload(Order.items).selectinload(Item.keywords))
+
+ .. versionadded:: 1.2
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`selectin_eager_loading`
+
+ """
+ return loadopt.set_relationship_strategy(attr, {"lazy": "selectin"})
+
+
+@selectinload._add_unbound_fn
+def selectinload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, False, {})
+
+
+@loader_option()
+def lazyload(loadopt, attr):
+ """Indicate that the given attribute should be loaded using "lazy"
+ loading.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`lazy_loading`
+
+ """
+ return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
+
+
+@lazyload._add_unbound_fn
+def lazyload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
+
+
+@loader_option()
+def immediateload(loadopt, attr):
+ """Indicate that the given attribute should be loaded using
+ an immediate load with a per-attribute SELECT statement.
+
+ The load is achieved using the "lazyloader" strategy and does not
+ fire off any additional eager loaders.
+
+ The :func:`.immediateload` option is superseded in general
+ by the :func:`.selectinload` option, which performs the same task
+ more efficiently by emitting a SELECT for all loaded objects.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`selectin_eager_loading`
+
+ """
+ loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
+ return loader
+
+
+@immediateload._add_unbound_fn
+def immediateload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.immediateload, keys, False, {})
+
+
+@loader_option()
+def noload(loadopt, attr):
+ """Indicate that the given relationship attribute should remain unloaded.
+
+ The relationship attribute will return ``None`` when accessed without
+ producing any loading effect.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ :func:`_orm.noload` applies to :func:`_orm.relationship` attributes; for
+ column-based attributes, see :func:`_orm.defer`.
+
+ .. note:: Setting this loading strategy as the default strategy
+ for a relationship using the :paramref:`.orm.relationship.lazy`
+ parameter may cause issues with flushes, such if a delete operation
+ needs to load related objects and instead ``None`` was returned.
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ """
+
+ return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
+
+
+@noload._add_unbound_fn
+def noload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
+
+
+@loader_option()
+def raiseload(loadopt, attr, sql_only=False):
+ """Indicate that the given attribute should raise an error if accessed.
+
+ A relationship attribute configured with :func:`_orm.raiseload` will
+ raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
+ typical way this is useful is when an application is attempting to ensure
+ that all relationship attributes that are accessed in a particular context
+ would have been already loaded via eager loading. Instead of having
+ to read through SQL logs to ensure lazy loads aren't occurring, this
+ strategy will cause them to raise immediately.
+
+ :func:`_orm.raiseload` applies to :func:`_orm.relationship`
+ attributes only.
+ In order to apply raise-on-SQL behavior to a column-based attribute,
+ use the :paramref:`.orm.defer.raiseload` parameter on the :func:`.defer`
+ loader option.
+
+ :param sql_only: if True, raise only if the lazy load would emit SQL, but
+ not if it is only checking the identity map, or determining that the
+ related value should just be None due to missing keys. When False, the
+ strategy will raise for all varieties of relationship loading.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`loading_toplevel`
+
+ :ref:`prevent_lazy_with_raiseload`
+
+ :ref:`deferred_raiseload`
+
+ """
+
+ return loadopt.set_relationship_strategy(
+ attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
+ )
+
+
+@raiseload._add_unbound_fn
+def raiseload(*keys, **kw):
+ return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw)
+
+
+@loader_option()
+def defaultload(loadopt, attr):
+ """Indicate an attribute should load using its default loader style.
+
+ This method is used to link to other loader options further into
+ a chain of attributes without altering the loader style of the links
+ along the chain. For example, to set joined eager loading for an
+ element of an element::
+
+ session.query(MyClass).options(
+ defaultload(MyClass.someattribute).
+ joinedload(MyOtherClass.someotherattribute)
+ )
+
+ :func:`.defaultload` is also useful for setting column-level options
+ on a related class, namely that of :func:`.defer` and :func:`.undefer`::
+
+ session.query(MyClass).options(
+ defaultload(MyClass.someattribute).
+ defer("some_column").
+ undefer("some_other_column")
+ )
+
+ .. seealso::
+
+ :meth:`_orm.Load.options` - allows for complex hierarchical
+ loader option structures with less verbosity than with individual
+ :func:`.defaultload` directives.
+
+ :ref:`relationship_loader_options`
+
+ :ref:`deferred_loading_w_multiple`
+
+ """
+ return loadopt.set_relationship_strategy(attr, None)
+
+
+@defaultload._add_unbound_fn
+def defaultload(*keys):
+ return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
+
+
+@loader_option()
+def defer(loadopt, key, raiseload=False):
+ r"""Indicate that the given column-oriented attribute should be deferred,
+ e.g. not loaded until accessed.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ e.g.::
+
+ from sqlalchemy.orm import defer
+
+ session.query(MyClass).options(
+ defer("attribute_one"),
+ defer("attribute_two"))
+
+ session.query(MyClass).options(
+ defer(MyClass.attribute_one),
+ defer(MyClass.attribute_two))
+
+ To specify a deferred load of an attribute on a related class,
+ the path can be specified one token at a time, specifying the loading
+ style for each link along the chain. To leave the loading style
+ for a link unchanged, use :func:`_orm.defaultload`::
+
+ session.query(MyClass).options(defaultload("someattr").defer("some_column"))
+
+ A :class:`_orm.Load` object that is present on a certain path can have
+ :meth:`_orm.Load.defer` called multiple times,
+ each will operate on the same
+ parent entity::
+
+
+ session.query(MyClass).options(
+ defaultload("someattr").
+ defer("some_column").
+ defer("some_other_column").
+ defer("another_column")
+ )
+
+ :param key: Attribute to be deferred.
+
+ :param raiseload: raise :class:`.InvalidRequestError` if the column
+ value is to be loaded from emitting SQL. Used to prevent unwanted
+ SQL from being emitted.
+
+ .. versionadded:: 1.4
+
+ .. seealso::
+
+ :ref:`deferred_raiseload`
+
+ :param \*addl_attrs: This option supports the old 0.8 style
+ of specifying a path as a series of attributes, which is now superseded
+ by the method-chained style.
+
+ .. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.defer` is
+ deprecated and will be removed in a future release. Please
+ use method chaining in conjunction with defaultload() to
+ indicate a path.
+
+
+ .. seealso::
+
+ :ref:`deferred`
+
+ :func:`_orm.undefer`
+
+ """
+ strategy = {"deferred": True, "instrument": True}
+ if raiseload:
+ strategy["raiseload"] = True
+ return loadopt.set_column_strategy((key,), strategy)
+
+
+@defer._add_unbound_fn
+def defer(key, *addl_attrs, **kw):
+ if addl_attrs:
+ util.warn_deprecated(
+ "The *addl_attrs on orm.defer is deprecated. Please use "
+ "method chaining in conjunction with defaultload() to "
+ "indicate a path.",
+ version="1.3",
+ )
+ return _UnboundLoad._from_keys(
+ _UnboundLoad.defer, (key,) + addl_attrs, False, kw
+ )
+
+
+@loader_option()
+def undefer(loadopt, key):
+ r"""Indicate that the given column-oriented attribute should be undeferred,
+ e.g. specified within the SELECT statement of the entity as a whole.
+
+ The column being undeferred is typically set up on the mapping as a
+ :func:`.deferred` attribute.
+
+ This function is part of the :class:`_orm.Load` interface and supports
+ both method-chained and standalone operation.
+
+ Examples::
+
+ # undefer two columns
+ session.query(MyClass).options(undefer("col1"), undefer("col2"))
+
+ # undefer all columns specific to a single class using Load + *
+ session.query(MyClass, MyOtherClass).options(
+ Load(MyClass).undefer("*"))
+
+ # undefer a column on a related object
+ session.query(MyClass).options(
+ defaultload(MyClass.items).undefer('text'))
+
+ :param key: Attribute to be undeferred.
+
+ :param \*addl_attrs: This option supports the old 0.8 style
+ of specifying a path as a series of attributes, which is now superseded
+ by the method-chained style.
+
+ .. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.undefer` is
+ deprecated and will be removed in a future release. Please
+ use method chaining in conjunction with defaultload() to
+ indicate a path.
+
+ .. seealso::
+
+ :ref:`deferred`
+
+ :func:`_orm.defer`
+
+ :func:`_orm.undefer_group`
+
+ """
+ return loadopt.set_column_strategy(
+ (key,), {"deferred": False, "instrument": True}
+ )
+
+
+@undefer._add_unbound_fn
+def undefer(key, *addl_attrs):
+ if addl_attrs:
+ util.warn_deprecated(
+ "The *addl_attrs on orm.undefer is deprecated. Please use "
+ "method chaining in conjunction with defaultload() to "
+ "indicate a path.",
+ version="1.3",
+ )
+ return _UnboundLoad._from_keys(
+ _UnboundLoad.undefer, (key,) + addl_attrs, False, {}
+ )
+
+
+@loader_option()
+def undefer_group(loadopt, name):
+ """Indicate that columns within the given deferred group name should be
+ undeferred.
+
+ The columns being undeferred are set up on the mapping as
+ :func:`.deferred` attributes and include a "group" name.
+
+ E.g::
+
+ session.query(MyClass).options(undefer_group("large_attrs"))
+
+ To undefer a group of attributes on a related entity, the path can be
+ spelled out using relationship loader options, such as
+ :func:`_orm.defaultload`::
+
+ session.query(MyClass).options(
+ defaultload("someattr").undefer_group("large_attrs"))
+
+ .. versionchanged:: 0.9.0 :func:`_orm.undefer_group` is now specific to a
+ particular entity load path.
+
+ .. seealso::
+
+ :ref:`deferred`
+
+ :func:`_orm.defer`
+
+ :func:`_orm.undefer`
+
+ """
+ return loadopt.set_column_strategy(
+ "*", None, {"undefer_group_%s" % name: True}, opts_only=True
+ )
+
+
+@undefer_group._add_unbound_fn
+def undefer_group(name):
+ return _UnboundLoad().undefer_group(name)
+
+
+@loader_option()
+def with_expression(loadopt, key, expression):
+ r"""Apply an ad-hoc SQL expression to a "deferred expression" attribute.
+
+ This option is used in conjunction with the :func:`_orm.query_expression`
+ mapper-level construct that indicates an attribute which should be the
+ target of an ad-hoc SQL expression.
+
+ E.g.::
+
+
+ sess.query(SomeClass).options(
+ with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
+ )
+
+ .. versionadded:: 1.2
+
+ :param key: Attribute to be undeferred.
+
+ :param expr: SQL expression to be applied to the attribute.
+
+ .. note:: the target attribute is populated only if the target object
+ is **not currently loaded** in the current :class:`_orm.Session`
+ unless the :meth:`_query.Query.populate_existing` method is used.
+ Please refer to :ref:`mapper_querytime_expression` for complete
+ usage details.
+
+ .. seealso::
+
+ :ref:`mapper_querytime_expression`
+
+ """
+
+ expression = coercions.expect(
+ roles.LabeledColumnExprRole, _orm_full_deannotate(expression)
+ )
+
+ return loadopt.set_column_strategy(
+ (key,), {"query_expression": True}, opts={"expression": expression}
+ )
+
+
+@with_expression._add_unbound_fn
+def with_expression(key, expression):
+ return _UnboundLoad._from_keys(
+ _UnboundLoad.with_expression, (key,), False, {"expression": expression}
+ )
+
+
+@loader_option()
+def selectin_polymorphic(loadopt, classes):
+ """Indicate an eager load should take place for all attributes
+ specific to a subclass.
+
+ This uses an additional SELECT with IN against all matched primary
+ key values, and is the per-query analogue to the ``"selectin"``
+ setting on the :paramref:`.mapper.polymorphic_load` parameter.
+
+ .. versionadded:: 1.2
+
+ .. seealso::
+
+ :ref:`polymorphic_selectin`
+
+ """
+ loadopt.set_class_strategy(
+ {"selectinload_polymorphic": True},
+ opts={
+ "entities": tuple(
+ sorted((inspect(cls) for cls in classes), key=id)
+ )
+ },
+ )
+ return loadopt
+
+
+@selectin_polymorphic._add_unbound_fn
+def selectin_polymorphic(base_cls, classes):
+ ul = _UnboundLoad()
+ ul.is_class_strategy = True
+ ul.path = (inspect(base_cls),)
+ ul.selectin_polymorphic(classes)
+ return ul
diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py
new file mode 100644
index 0000000..c041804
--- /dev/null
+++ b/lib/sqlalchemy/orm/sync.py
@@ -0,0 +1,167 @@
+# orm/sync.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""private module containing functions used for copying data
+between instances based on join conditions.
+
+"""
+
+from . import attributes
+from . import exc
+from . import util as orm_util
+from .. import util
+
+
+def populate(
+ source,
+ source_mapper,
+ dest,
+ dest_mapper,
+ synchronize_pairs,
+ uowcommit,
+ flag_cascaded_pks,
+):
+ source_dict = source.dict
+ dest_dict = dest.dict
+
+ for l, r in synchronize_pairs:
+ try:
+ # inline of source_mapper._get_state_attr_by_column
+ prop = source_mapper._columntoproperty[l]
+ value = source.manager[prop.key].impl.get(
+ source, source_dict, attributes.PASSIVE_OFF
+ )
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(False, source_mapper, l, dest_mapper, r, err)
+
+ try:
+ # inline of dest_mapper._set_state_attr_by_column
+ prop = dest_mapper._columntoproperty[r]
+ dest.manager[prop.key].impl.set(dest, dest_dict, value, None)
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(True, source_mapper, l, dest_mapper, r, err)
+
+ # technically the "r.primary_key" check isn't
+ # needed here, but we check for this condition to limit
+ # how often this logic is invoked for memory/performance
+ # reasons, since we only need this info for a primary key
+ # destination.
+ if (
+ flag_cascaded_pks
+ and l.primary_key
+ and r.primary_key
+ and r.references(l)
+ ):
+ uowcommit.attributes[("pk_cascaded", dest, r)] = True
+
+
+def bulk_populate_inherit_keys(source_dict, source_mapper, synchronize_pairs):
+ # a simplified version of populate() used by bulk insert mode
+ for l, r in synchronize_pairs:
+ try:
+ prop = source_mapper._columntoproperty[l]
+ value = source_dict[prop.key]
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(False, source_mapper, l, source_mapper, r, err)
+
+ try:
+ prop = source_mapper._columntoproperty[r]
+ source_dict[prop.key] = value
+ except exc.UnmappedColumnError:
+ _raise_col_to_prop(True, source_mapper, l, source_mapper, r)
+
+
+def clear(dest, dest_mapper, synchronize_pairs):
+ for l, r in synchronize_pairs:
+ if (
+ r.primary_key
+ and dest_mapper._get_state_attr_by_column(dest, dest.dict, r)
+ not in orm_util._none_set
+ ):
+
+ raise AssertionError(
+ "Dependency rule tried to blank-out primary key "
+ "column '%s' on instance '%s'" % (r, orm_util.state_str(dest))
+ )
+ try:
+ dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None)
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(True, None, l, dest_mapper, r, err)
+
+
+def update(source, source_mapper, dest, old_prefix, synchronize_pairs):
+ for l, r in synchronize_pairs:
+ try:
+ oldvalue = source_mapper._get_committed_attr_by_column(
+ source.obj(), l
+ )
+ value = source_mapper._get_state_attr_by_column(
+ source, source.dict, l, passive=attributes.PASSIVE_OFF
+ )
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(False, source_mapper, l, None, r, err)
+ dest[r.key] = value
+ dest[old_prefix + r.key] = oldvalue
+
+
+def populate_dict(source, source_mapper, dict_, synchronize_pairs):
+ for l, r in synchronize_pairs:
+ try:
+ value = source_mapper._get_state_attr_by_column(
+ source, source.dict, l, passive=attributes.PASSIVE_OFF
+ )
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(False, source_mapper, l, None, r, err)
+
+ dict_[r.key] = value
+
+
+def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
+ """return true if the source object has changes from an old to a
+ new value on the given synchronize pairs
+
+ """
+ for l, r in synchronize_pairs:
+ try:
+ prop = source_mapper._columntoproperty[l]
+ except exc.UnmappedColumnError as err:
+ _raise_col_to_prop(False, source_mapper, l, None, r, err)
+ history = uowcommit.get_attribute_history(
+ source, prop.key, attributes.PASSIVE_NO_INITIALIZE
+ )
+ if bool(history.deleted):
+ return True
+ else:
+ return False
+
+
+def _raise_col_to_prop(
+ isdest, source_mapper, source_column, dest_mapper, dest_column, err
+):
+ if isdest:
+ util.raise_(
+ exc.UnmappedColumnError(
+ "Can't execute sync rule for "
+ "destination column '%s'; mapper '%s' does not map "
+ "this column. Try using an explicit `foreign_keys` "
+ "collection which does not include this column (or use "
+ "a viewonly=True relation)." % (dest_column, dest_mapper)
+ ),
+ replace_context=err,
+ )
+ else:
+ util.raise_(
+ exc.UnmappedColumnError(
+ "Can't execute sync rule for "
+ "source column '%s'; mapper '%s' does not map this "
+ "column. Try using an explicit `foreign_keys` "
+ "collection which does not include destination column "
+ "'%s' (or use a viewonly=True relation)."
+ % (source_column, source_mapper, dest_column)
+ ),
+ replace_context=err,
+ )
diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py
new file mode 100644
index 0000000..2257637
--- /dev/null
+++ b/lib/sqlalchemy/orm/unitofwork.py
@@ -0,0 +1,784 @@
+# orm/unitofwork.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+"""The internals for the unit of work system.
+
+The session's flush() process passes objects to a contextual object
+here, which assembles flush tasks based on mappers and their properties,
+organizes them in order of dependency, and executes.
+
+"""
+
+from . import attributes
+from . import exc as orm_exc
+from . import util as orm_util
+from .. import event
+from .. import util
+from ..util import topological
+
+
+def _warn_for_cascade_backrefs(state, prop):
+ util.warn_deprecated_20(
+ '"%s" object is being merged into a Session along the backref '
+ 'cascade path for relationship "%s"; in SQLAlchemy 2.0, this '
+ "reverse cascade will not take place. Set cascade_backrefs to "
+ "False in either the relationship() or backref() function for "
+ "the 2.0 behavior; or to set globally for the whole "
+ "Session, set the future=True flag" % (state.class_.__name__, prop),
+ code="s9r1",
+ )
+
+
+def track_cascade_events(descriptor, prop):
+ """Establish event listeners on object attributes which handle
+ cascade-on-set/append.
+
+ """
+ key = prop.key
+
+ def append(state, item, initiator):
+ # process "save_update" cascade rules for when
+ # an instance is appended to the list of another instance
+
+ if item is None:
+ return
+
+ sess = state.session
+ if sess:
+ if sess._warn_on_events:
+ sess._flush_warning("collection append")
+
+ prop = state.manager.mapper._props[key]
+ item_state = attributes.instance_state(item)
+
+ if (
+ prop._cascade.save_update
+ and (
+ (prop.cascade_backrefs and not sess.future)
+ or key == initiator.key
+ )
+ and not sess._contains_state(item_state)
+ ):
+ if key != initiator.key:
+ _warn_for_cascade_backrefs(item_state, prop)
+ sess._save_or_update_state(item_state)
+ return item
+
+ def remove(state, item, initiator):
+ if item is None:
+ return
+
+ sess = state.session
+
+ prop = state.manager.mapper._props[key]
+
+ if sess and sess._warn_on_events:
+ sess._flush_warning(
+ "collection remove"
+ if prop.uselist
+ else "related attribute delete"
+ )
+
+ if (
+ item is not None
+ and item is not attributes.NEVER_SET
+ and item is not attributes.PASSIVE_NO_RESULT
+ and prop._cascade.delete_orphan
+ ):
+ # expunge pending orphans
+ item_state = attributes.instance_state(item)
+
+ if prop.mapper._is_orphan(item_state):
+ if sess and item_state in sess._new:
+ sess.expunge(item)
+ else:
+ # the related item may or may not itself be in a
+ # Session, however the parent for which we are catching
+ # the event is not in a session, so memoize this on the
+ # item
+ item_state._orphaned_outside_of_session = True
+
+ def set_(state, newvalue, oldvalue, initiator):
+ # process "save_update" cascade rules for when an instance
+ # is attached to another instance
+ if oldvalue is newvalue:
+ return newvalue
+
+ sess = state.session
+ if sess:
+
+ if sess._warn_on_events:
+ sess._flush_warning("related attribute set")
+
+ prop = state.manager.mapper._props[key]
+ if newvalue is not None:
+ newvalue_state = attributes.instance_state(newvalue)
+ if (
+ prop._cascade.save_update
+ and (
+ (prop.cascade_backrefs and not sess.future)
+ or key == initiator.key
+ )
+ and not sess._contains_state(newvalue_state)
+ ):
+ if key != initiator.key:
+ _warn_for_cascade_backrefs(newvalue_state, prop)
+ sess._save_or_update_state(newvalue_state)
+
+ if (
+ oldvalue is not None
+ and oldvalue is not attributes.NEVER_SET
+ and oldvalue is not attributes.PASSIVE_NO_RESULT
+ and prop._cascade.delete_orphan
+ ):
+ # possible to reach here with attributes.NEVER_SET ?
+ oldvalue_state = attributes.instance_state(oldvalue)
+
+ if oldvalue_state in sess._new and prop.mapper._is_orphan(
+ oldvalue_state
+ ):
+ sess.expunge(oldvalue)
+ return newvalue
+
+ event.listen(descriptor, "append_wo_mutation", append, raw=True)
+ event.listen(descriptor, "append", append, raw=True, retval=True)
+ event.listen(descriptor, "remove", remove, raw=True, retval=True)
+ event.listen(descriptor, "set", set_, raw=True, retval=True)
+
+
+class UOWTransaction(object):
+ def __init__(self, session):
+ self.session = session
+
+ # dictionary used by external actors to
+ # store arbitrary state information.
+ self.attributes = {}
+
+ # dictionary of mappers to sets of
+ # DependencyProcessors, which are also
+ # set to be part of the sorted flush actions,
+ # which have that mapper as a parent.
+ self.deps = util.defaultdict(set)
+
+ # dictionary of mappers to sets of InstanceState
+ # items pending for flush which have that mapper
+ # as a parent.
+ self.mappers = util.defaultdict(set)
+
+ # a dictionary of Preprocess objects, which gather
+ # additional states impacted by the flush
+ # and determine if a flush action is needed
+ self.presort_actions = {}
+
+ # dictionary of PostSortRec objects, each
+ # one issues work during the flush within
+ # a certain ordering.
+ self.postsort_actions = {}
+
+ # a set of 2-tuples, each containing two
+ # PostSortRec objects where the second
+ # is dependent on the first being executed
+ # first
+ self.dependencies = set()
+
+ # dictionary of InstanceState-> (isdelete, listonly)
+ # tuples, indicating if this state is to be deleted
+ # or insert/updated, or just refreshed
+ self.states = {}
+
+ # tracks InstanceStates which will be receiving
+ # a "post update" call. Keys are mappers,
+ # values are a set of states and a set of the
+ # columns which should be included in the update.
+ self.post_update_states = util.defaultdict(lambda: (set(), set()))
+
+ @property
+ def has_work(self):
+ return bool(self.states)
+
+ def was_already_deleted(self, state):
+ """Return ``True`` if the given state is expired and was deleted
+ previously.
+ """
+ if state.expired:
+ try:
+ state._load_expired(state, attributes.PASSIVE_OFF)
+ except orm_exc.ObjectDeletedError:
+ self.session._remove_newly_deleted([state])
+ return True
+ return False
+
+ def is_deleted(self, state):
+ """Return ``True`` if the given state is marked as deleted
+ within this uowtransaction."""
+
+ return state in self.states and self.states[state][0]
+
+ def memo(self, key, callable_):
+ if key in self.attributes:
+ return self.attributes[key]
+ else:
+ self.attributes[key] = ret = callable_()
+ return ret
+
+ def remove_state_actions(self, state):
+ """Remove pending actions for a state from the uowtransaction."""
+
+ isdelete = self.states[state][0]
+
+ self.states[state] = (isdelete, True)
+
+ def get_attribute_history(
+ self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE
+ ):
+ """Facade to attributes.get_state_history(), including
+ caching of results."""
+
+ hashkey = ("history", state, key)
+
+ # cache the objects, not the states; the strong reference here
+ # prevents newly loaded objects from being dereferenced during the
+ # flush process
+
+ if hashkey in self.attributes:
+ history, state_history, cached_passive = self.attributes[hashkey]
+ # if the cached lookup was "passive" and now
+ # we want non-passive, do a non-passive lookup and re-cache
+
+ if (
+ not cached_passive & attributes.SQL_OK
+ and passive & attributes.SQL_OK
+ ):
+ impl = state.manager[key].impl
+ history = impl.get_history(
+ state,
+ state.dict,
+ attributes.PASSIVE_OFF
+ | attributes.LOAD_AGAINST_COMMITTED
+ | attributes.NO_RAISE,
+ )
+ if history and impl.uses_objects:
+ state_history = history.as_state()
+ else:
+ state_history = history
+ self.attributes[hashkey] = (history, state_history, passive)
+ else:
+ impl = state.manager[key].impl
+ # TODO: store the history as (state, object) tuples
+ # so we don't have to keep converting here
+ history = impl.get_history(
+ state,
+ state.dict,
+ passive
+ | attributes.LOAD_AGAINST_COMMITTED
+ | attributes.NO_RAISE,
+ )
+ if history and impl.uses_objects:
+ state_history = history.as_state()
+ else:
+ state_history = history
+ self.attributes[hashkey] = (history, state_history, passive)
+
+ return state_history
+
+ def has_dep(self, processor):
+ return (processor, True) in self.presort_actions
+
+ def register_preprocessor(self, processor, fromparent):
+ key = (processor, fromparent)
+ if key not in self.presort_actions:
+ self.presort_actions[key] = Preprocess(processor, fromparent)
+
+ def register_object(
+ self,
+ state,
+ isdelete=False,
+ listonly=False,
+ cancel_delete=False,
+ operation=None,
+ prop=None,
+ ):
+ if not self.session._contains_state(state):
+ # this condition is normal when objects are registered
+ # as part of a relationship cascade operation. it should
+ # not occur for the top-level register from Session.flush().
+ if not state.deleted and operation is not None:
+ util.warn(
+ "Object of type %s not in session, %s operation "
+ "along '%s' will not proceed"
+ % (orm_util.state_class_str(state), operation, prop)
+ )
+ return False
+
+ if state not in self.states:
+ mapper = state.manager.mapper
+
+ if mapper not in self.mappers:
+ self._per_mapper_flush_actions(mapper)
+
+ self.mappers[mapper].add(state)
+ self.states[state] = (isdelete, listonly)
+ else:
+ if not listonly and (isdelete or cancel_delete):
+ self.states[state] = (isdelete, False)
+ return True
+
+ def register_post_update(self, state, post_update_cols):
+ mapper = state.manager.mapper.base_mapper
+ states, cols = self.post_update_states[mapper]
+ states.add(state)
+ cols.update(post_update_cols)
+
+ def _per_mapper_flush_actions(self, mapper):
+ saves = SaveUpdateAll(self, mapper.base_mapper)
+ deletes = DeleteAll(self, mapper.base_mapper)
+ self.dependencies.add((saves, deletes))
+
+ for dep in mapper._dependency_processors:
+ dep.per_property_preprocessors(self)
+
+ for prop in mapper.relationships:
+ if prop.viewonly:
+ continue
+ dep = prop._dependency_processor
+ dep.per_property_preprocessors(self)
+
+ @util.memoized_property
+ def _mapper_for_dep(self):
+ """return a dynamic mapping of (Mapper, DependencyProcessor) to
+ True or False, indicating if the DependencyProcessor operates
+ on objects of that Mapper.
+
+ The result is stored in the dictionary persistently once
+ calculated.
+
+ """
+ return util.PopulateDict(
+ lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop
+ )
+
+ def filter_states_for_dep(self, dep, states):
+ """Filter the given list of InstanceStates to those relevant to the
+ given DependencyProcessor.
+
+ """
+ mapper_for_dep = self._mapper_for_dep
+ return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
+
+ def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
+ checktup = (isdelete, listonly)
+ for mapper in mapper.base_mapper.self_and_descendants:
+ for state in self.mappers[mapper]:
+ if self.states[state] == checktup:
+ yield state
+
+ def _generate_actions(self):
+ """Generate the full, unsorted collection of PostSortRecs as
+ well as dependency pairs for this UOWTransaction.
+
+ """
+ # execute presort_actions, until all states
+ # have been processed. a presort_action might
+ # add new states to the uow.
+ while True:
+ ret = False
+ for action in list(self.presort_actions.values()):
+ if action.execute(self):
+ ret = True
+ if not ret:
+ break
+
+ # see if the graph of mapper dependencies has cycles.
+ self.cycles = cycles = topological.find_cycles(
+ self.dependencies, list(self.postsort_actions.values())
+ )
+
+ if cycles:
+ # if yes, break the per-mapper actions into
+ # per-state actions
+ convert = dict(
+ (rec, set(rec.per_state_flush_actions(self))) for rec in cycles
+ )
+
+ # rewrite the existing dependencies to point to
+ # the per-state actions for those per-mapper actions
+ # that were broken up.
+ for edge in list(self.dependencies):
+ if (
+ None in edge
+ or edge[0].disabled
+ or edge[1].disabled
+ or cycles.issuperset(edge)
+ ):
+ self.dependencies.remove(edge)
+ elif edge[0] in cycles:
+ self.dependencies.remove(edge)
+ for dep in convert[edge[0]]:
+ self.dependencies.add((dep, edge[1]))
+ elif edge[1] in cycles:
+ self.dependencies.remove(edge)
+ for dep in convert[edge[1]]:
+ self.dependencies.add((edge[0], dep))
+
+ return set(
+ [a for a in self.postsort_actions.values() if not a.disabled]
+ ).difference(cycles)
+
+ def execute(self):
+ postsort_actions = self._generate_actions()
+
+ postsort_actions = sorted(
+ postsort_actions,
+ key=lambda item: item.sort_key,
+ )
+ # sort = topological.sort(self.dependencies, postsort_actions)
+ # print "--------------"
+ # print "\ndependencies:", self.dependencies
+ # print "\ncycles:", self.cycles
+ # print "\nsort:", list(sort)
+ # print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions)
+
+ # execute
+ if self.cycles:
+ for subset in topological.sort_as_subsets(
+ self.dependencies, postsort_actions
+ ):
+ set_ = set(subset)
+ while set_:
+ n = set_.pop()
+ n.execute_aggregate(self, set_)
+ else:
+ for rec in topological.sort(self.dependencies, postsort_actions):
+ rec.execute(self)
+
+ def finalize_flush_changes(self):
+ """Mark processed objects as clean / deleted after a successful
+ flush().
+
+ This method is called within the flush() method after the
+ execute() method has succeeded and the transaction has been committed.
+
+ """
+ if not self.states:
+ return
+
+ states = set(self.states)
+ isdel = set(
+ s for (s, (isdelete, listonly)) in self.states.items() if isdelete
+ )
+ other = states.difference(isdel)
+ if isdel:
+ self.session._remove_newly_deleted(isdel)
+ if other:
+ self.session._register_persistent(other)
+
+
+class IterateMappersMixin(object):
+ def _mappers(self, uow):
+ if self.fromparent:
+ return iter(
+ m
+ for m in self.dependency_processor.parent.self_and_descendants
+ if uow._mapper_for_dep[(m, self.dependency_processor)]
+ )
+ else:
+ return self.dependency_processor.mapper.self_and_descendants
+
+
+class Preprocess(IterateMappersMixin):
+ __slots__ = (
+ "dependency_processor",
+ "fromparent",
+ "processed",
+ "setup_flush_actions",
+ )
+
+ def __init__(self, dependency_processor, fromparent):
+ self.dependency_processor = dependency_processor
+ self.fromparent = fromparent
+ self.processed = set()
+ self.setup_flush_actions = False
+
+ def execute(self, uow):
+ delete_states = set()
+ save_states = set()
+
+ for mapper in self._mappers(uow):
+ for state in uow.mappers[mapper].difference(self.processed):
+ (isdelete, listonly) = uow.states[state]
+ if not listonly:
+ if isdelete:
+ delete_states.add(state)
+ else:
+ save_states.add(state)
+
+ if delete_states:
+ self.dependency_processor.presort_deletes(uow, delete_states)
+ self.processed.update(delete_states)
+ if save_states:
+ self.dependency_processor.presort_saves(uow, save_states)
+ self.processed.update(save_states)
+
+ if delete_states or save_states:
+ if not self.setup_flush_actions and (
+ self.dependency_processor.prop_has_changes(
+ uow, delete_states, True
+ )
+ or self.dependency_processor.prop_has_changes(
+ uow, save_states, False
+ )
+ ):
+ self.dependency_processor.per_property_flush_actions(uow)
+ self.setup_flush_actions = True
+ return True
+ else:
+ return False
+
+
+class PostSortRec(object):
+ __slots__ = ("disabled",)
+
+ def __new__(cls, uow, *args):
+ key = (cls,) + args
+ if key in uow.postsort_actions:
+ return uow.postsort_actions[key]
+ else:
+ uow.postsort_actions[key] = ret = object.__new__(cls)
+ ret.disabled = False
+ return ret
+
+ def execute_aggregate(self, uow, recs):
+ self.execute(uow)
+
+
+class ProcessAll(IterateMappersMixin, PostSortRec):
+ __slots__ = "dependency_processor", "isdelete", "fromparent", "sort_key"
+
+ def __init__(self, uow, dependency_processor, isdelete, fromparent):
+ self.dependency_processor = dependency_processor
+ self.sort_key = (
+ "ProcessAll",
+ self.dependency_processor.sort_key,
+ isdelete,
+ )
+ self.isdelete = isdelete
+ self.fromparent = fromparent
+ uow.deps[dependency_processor.parent.base_mapper].add(
+ dependency_processor
+ )
+
+ def execute(self, uow):
+ states = self._elements(uow)
+ if self.isdelete:
+ self.dependency_processor.process_deletes(uow, states)
+ else:
+ self.dependency_processor.process_saves(uow, states)
+
+ def per_state_flush_actions(self, uow):
+ # this is handled by SaveUpdateAll and DeleteAll,
+ # since a ProcessAll should unconditionally be pulled
+ # into per-state if either the parent/child mappers
+ # are part of a cycle
+ return iter([])
+
+ def __repr__(self):
+ return "%s(%s, isdelete=%s)" % (
+ self.__class__.__name__,
+ self.dependency_processor,
+ self.isdelete,
+ )
+
+ def _elements(self, uow):
+ for mapper in self._mappers(uow):
+ for state in uow.mappers[mapper]:
+ (isdelete, listonly) = uow.states[state]
+ if isdelete == self.isdelete and not listonly:
+ yield state
+
+
+class PostUpdateAll(PostSortRec):
+ __slots__ = "mapper", "isdelete", "sort_key"
+
+ def __init__(self, uow, mapper, isdelete):
+ self.mapper = mapper
+ self.isdelete = isdelete
+ self.sort_key = ("PostUpdateAll", mapper._sort_key, isdelete)
+
+ @util.preload_module("sqlalchemy.orm.persistence")
+ def execute(self, uow):
+ persistence = util.preloaded.orm_persistence
+ states, cols = uow.post_update_states[self.mapper]
+ states = [s for s in states if uow.states[s][0] == self.isdelete]
+
+ persistence.post_update(self.mapper, states, uow, cols)
+
+
+class SaveUpdateAll(PostSortRec):
+ __slots__ = ("mapper", "sort_key")
+
+ def __init__(self, uow, mapper):
+ self.mapper = mapper
+ self.sort_key = ("SaveUpdateAll", mapper._sort_key)
+ assert mapper is mapper.base_mapper
+
+ @util.preload_module("sqlalchemy.orm.persistence")
+ def execute(self, uow):
+ util.preloaded.orm_persistence.save_obj(
+ self.mapper,
+ uow.states_for_mapper_hierarchy(self.mapper, False, False),
+ uow,
+ )
+
+ def per_state_flush_actions(self, uow):
+ states = list(
+ uow.states_for_mapper_hierarchy(self.mapper, False, False)
+ )
+ base_mapper = self.mapper.base_mapper
+ delete_all = DeleteAll(uow, base_mapper)
+ for state in states:
+ # keep saves before deletes -
+ # this ensures 'row switch' operations work
+ action = SaveUpdateState(uow, state)
+ uow.dependencies.add((action, delete_all))
+ yield action
+
+ for dep in uow.deps[self.mapper]:
+ states_for_prop = uow.filter_states_for_dep(dep, states)
+ dep.per_state_flush_actions(uow, states_for_prop, False)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.mapper)
+
+
+class DeleteAll(PostSortRec):
+ __slots__ = ("mapper", "sort_key")
+
+ def __init__(self, uow, mapper):
+ self.mapper = mapper
+ self.sort_key = ("DeleteAll", mapper._sort_key)
+ assert mapper is mapper.base_mapper
+
+ @util.preload_module("sqlalchemy.orm.persistence")
+ def execute(self, uow):
+ util.preloaded.orm_persistence.delete_obj(
+ self.mapper,
+ uow.states_for_mapper_hierarchy(self.mapper, True, False),
+ uow,
+ )
+
+ def per_state_flush_actions(self, uow):
+ states = list(
+ uow.states_for_mapper_hierarchy(self.mapper, True, False)
+ )
+ base_mapper = self.mapper.base_mapper
+ save_all = SaveUpdateAll(uow, base_mapper)
+ for state in states:
+ # keep saves before deletes -
+ # this ensures 'row switch' operations work
+ action = DeleteState(uow, state)
+ uow.dependencies.add((save_all, action))
+ yield action
+
+ for dep in uow.deps[self.mapper]:
+ states_for_prop = uow.filter_states_for_dep(dep, states)
+ dep.per_state_flush_actions(uow, states_for_prop, True)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.mapper)
+
+
+class ProcessState(PostSortRec):
+ __slots__ = "dependency_processor", "isdelete", "state", "sort_key"
+
+ def __init__(self, uow, dependency_processor, isdelete, state):
+ self.dependency_processor = dependency_processor
+ self.sort_key = ("ProcessState", dependency_processor.sort_key)
+ self.isdelete = isdelete
+ self.state = state
+
+ def execute_aggregate(self, uow, recs):
+ cls_ = self.__class__
+ dependency_processor = self.dependency_processor
+ isdelete = self.isdelete
+ our_recs = [
+ r
+ for r in recs
+ if r.__class__ is cls_
+ and r.dependency_processor is dependency_processor
+ and r.isdelete is isdelete
+ ]
+ recs.difference_update(our_recs)
+ states = [self.state] + [r.state for r in our_recs]
+ if isdelete:
+ dependency_processor.process_deletes(uow, states)
+ else:
+ dependency_processor.process_saves(uow, states)
+
+ def __repr__(self):
+ return "%s(%s, %s, delete=%s)" % (
+ self.__class__.__name__,
+ self.dependency_processor,
+ orm_util.state_str(self.state),
+ self.isdelete,
+ )
+
+
+class SaveUpdateState(PostSortRec):
+ __slots__ = "state", "mapper", "sort_key"
+
+ def __init__(self, uow, state):
+ self.state = state
+ self.mapper = state.mapper.base_mapper
+ self.sort_key = ("ProcessState", self.mapper._sort_key)
+
+ @util.preload_module("sqlalchemy.orm.persistence")
+ def execute_aggregate(self, uow, recs):
+ persistence = util.preloaded.orm_persistence
+ cls_ = self.__class__
+ mapper = self.mapper
+ our_recs = [
+ r for r in recs if r.__class__ is cls_ and r.mapper is mapper
+ ]
+ recs.difference_update(our_recs)
+ persistence.save_obj(
+ mapper, [self.state] + [r.state for r in our_recs], uow
+ )
+
+ def __repr__(self):
+ return "%s(%s)" % (
+ self.__class__.__name__,
+ orm_util.state_str(self.state),
+ )
+
+
+class DeleteState(PostSortRec):
+ __slots__ = "state", "mapper", "sort_key"
+
+ def __init__(self, uow, state):
+ self.state = state
+ self.mapper = state.mapper.base_mapper
+ self.sort_key = ("DeleteState", self.mapper._sort_key)
+
+ @util.preload_module("sqlalchemy.orm.persistence")
+ def execute_aggregate(self, uow, recs):
+ persistence = util.preloaded.orm_persistence
+ cls_ = self.__class__
+ mapper = self.mapper
+ our_recs = [
+ r for r in recs if r.__class__ is cls_ and r.mapper is mapper
+ ]
+ recs.difference_update(our_recs)
+ states = [self.state] + [r.state for r in our_recs]
+ persistence.delete_obj(
+ mapper, [s for s in states if uow.states[s][0]], uow
+ )
+
+ def __repr__(self):
+ return "%s(%s)" % (
+ self.__class__.__name__,
+ orm_util.state_str(self.state),
+ )
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
new file mode 100644
index 0000000..56aa9ff
--- /dev/null
+++ b/lib/sqlalchemy/orm/util.py
@@ -0,0 +1,2149 @@
+# orm/util.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+
+import re
+import types
+import weakref
+
+from . import attributes # noqa
+from .base import _class_to_mapper # noqa
+from .base import _never_set # noqa
+from .base import _none_set # noqa
+from .base import attribute_str # noqa
+from .base import class_mapper # noqa
+from .base import InspectionAttr # noqa
+from .base import instance_str # noqa
+from .base import object_mapper # noqa
+from .base import object_state # noqa
+from .base import state_attribute_str # noqa
+from .base import state_class_str # noqa
+from .base import state_str # noqa
+from .interfaces import CriteriaOption
+from .interfaces import MapperProperty # noqa
+from .interfaces import ORMColumnsClauseRole
+from .interfaces import ORMEntityColumnsClauseRole
+from .interfaces import ORMFromClauseRole
+from .interfaces import PropComparator # noqa
+from .path_registry import PathRegistry # noqa
+from .. import event
+from .. import exc as sa_exc
+from .. import inspection
+from .. import sql
+from .. import util
+from ..engine.result import result_tuple
+from ..sql import base as sql_base
+from ..sql import coercions
+from ..sql import expression
+from ..sql import lambdas
+from ..sql import roles
+from ..sql import util as sql_util
+from ..sql import visitors
+from ..sql.annotation import SupportsCloneAnnotations
+from ..sql.base import ColumnCollection
+
+
+all_cascades = frozenset(
+ (
+ "delete",
+ "delete-orphan",
+ "all",
+ "merge",
+ "expunge",
+ "save-update",
+ "refresh-expire",
+ "none",
+ )
+)
+
+
+class CascadeOptions(frozenset):
+ """Keeps track of the options sent to
+ :paramref:`.relationship.cascade`"""
+
+ _add_w_all_cascades = all_cascades.difference(
+ ["all", "none", "delete-orphan"]
+ )
+ _allowed_cascades = all_cascades
+
+ _viewonly_cascades = ["expunge", "all", "none", "refresh-expire"]
+
+ __slots__ = (
+ "save_update",
+ "delete",
+ "refresh_expire",
+ "merge",
+ "expunge",
+ "delete_orphan",
+ )
+
+ def __new__(cls, value_list):
+ if isinstance(value_list, util.string_types) or value_list is None:
+ return cls.from_string(value_list)
+ values = set(value_list)
+ if values.difference(cls._allowed_cascades):
+ raise sa_exc.ArgumentError(
+ "Invalid cascade option(s): %s"
+ % ", ".join(
+ [
+ repr(x)
+ for x in sorted(
+ values.difference(cls._allowed_cascades)
+ )
+ ]
+ )
+ )
+
+ if "all" in values:
+ values.update(cls._add_w_all_cascades)
+ if "none" in values:
+ values.clear()
+ values.discard("all")
+
+ self = frozenset.__new__(CascadeOptions, values)
+ self.save_update = "save-update" in values
+ self.delete = "delete" in values
+ self.refresh_expire = "refresh-expire" in values
+ self.merge = "merge" in values
+ self.expunge = "expunge" in values
+ self.delete_orphan = "delete-orphan" in values
+
+ if self.delete_orphan and not self.delete:
+ util.warn(
+ "The 'delete-orphan' cascade " "option requires 'delete'."
+ )
+ return self
+
+ def __repr__(self):
+ return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)]))
+
+ @classmethod
+ def from_string(cls, arg):
+ values = [c for c in re.split(r"\s*,\s*", arg or "") if c]
+ return cls(values)
+
+
+def _validator_events(desc, key, validator, include_removes, include_backrefs):
+ """Runs a validation method on an attribute value to be set or
+ appended.
+ """
+
+ if not include_backrefs:
+
+ def detect_is_backref(state, initiator):
+ impl = state.manager[key].impl
+ return initiator.impl is not impl
+
+ if include_removes:
+
+ def append(state, value, initiator):
+ if initiator.op is not attributes.OP_BULK_REPLACE and (
+ include_backrefs or not detect_is_backref(state, initiator)
+ ):
+ return validator(state.obj(), key, value, False)
+ else:
+ return value
+
+ def bulk_set(state, values, initiator):
+ if include_backrefs or not detect_is_backref(state, initiator):
+ obj = state.obj()
+ values[:] = [
+ validator(obj, key, value, False) for value in values
+ ]
+
+ def set_(state, value, oldvalue, initiator):
+ if include_backrefs or not detect_is_backref(state, initiator):
+ return validator(state.obj(), key, value, False)
+ else:
+ return value
+
+ def remove(state, value, initiator):
+ if include_backrefs or not detect_is_backref(state, initiator):
+ validator(state.obj(), key, value, True)
+
+ else:
+
+ def append(state, value, initiator):
+ if initiator.op is not attributes.OP_BULK_REPLACE and (
+ include_backrefs or not detect_is_backref(state, initiator)
+ ):
+ return validator(state.obj(), key, value)
+ else:
+ return value
+
+ def bulk_set(state, values, initiator):
+ if include_backrefs or not detect_is_backref(state, initiator):
+ obj = state.obj()
+ values[:] = [validator(obj, key, value) for value in values]
+
+ def set_(state, value, oldvalue, initiator):
+ if include_backrefs or not detect_is_backref(state, initiator):
+ return validator(state.obj(), key, value)
+ else:
+ return value
+
+ event.listen(desc, "append", append, raw=True, retval=True)
+ event.listen(desc, "bulk_replace", bulk_set, raw=True)
+ event.listen(desc, "set", set_, raw=True, retval=True)
+ if include_removes:
+ event.listen(desc, "remove", remove, raw=True, retval=True)
+
+
+def polymorphic_union(
+ table_map, typecolname, aliasname="p_union", cast_nulls=True
+):
+ """Create a ``UNION`` statement used by a polymorphic mapper.
+
+ See :ref:`concrete_inheritance` for an example of how
+ this is used.
+
+ :param table_map: mapping of polymorphic identities to
+ :class:`_schema.Table` objects.
+ :param typecolname: string name of a "discriminator" column, which will be
+ derived from the query, producing the polymorphic identity for
+ each row. If ``None``, no polymorphic discriminator is generated.
+ :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
+ construct generated.
+ :param cast_nulls: if True, non-existent columns, which are represented
+ as labeled NULLs, will be passed into CAST. This is a legacy behavior
+ that is problematic on some backends such as Oracle - in which case it
+ can be set to False.
+
+ """
+
+ colnames = util.OrderedSet()
+ colnamemaps = {}
+ types = {}
+ for key in table_map:
+ table = table_map[key]
+
+ table = coercions.expect(
+ roles.StrictFromClauseRole, table, allow_select=True
+ )
+ table_map[key] = table
+
+ m = {}
+ for c in table.c:
+ if c.key == typecolname:
+ raise sa_exc.InvalidRequestError(
+ "Polymorphic union can't use '%s' as the discriminator "
+ "column due to mapped column %r; please apply the "
+ "'typecolname' "
+ "argument; this is available on "
+ "ConcreteBase as '_concrete_discriminator_name'"
+ % (typecolname, c)
+ )
+ colnames.add(c.key)
+ m[c.key] = c
+ types[c.key] = c.type
+ colnamemaps[table] = m
+
+ def col(name, table):
+ try:
+ return colnamemaps[table][name]
+ except KeyError:
+ if cast_nulls:
+ return sql.cast(sql.null(), types[name]).label(name)
+ else:
+ return sql.type_coerce(sql.null(), types[name]).label(name)
+
+ result = []
+ for type_, table in table_map.items():
+ if typecolname is not None:
+ result.append(
+ sql.select(
+ *(
+ [col(name, table) for name in colnames]
+ + [
+ sql.literal_column(
+ sql_util._quote_ddl_expr(type_)
+ ).label(typecolname)
+ ]
+ )
+ ).select_from(table)
+ )
+ else:
+ result.append(
+ sql.select(
+ *[col(name, table) for name in colnames]
+ ).select_from(table)
+ )
+ return sql.union_all(*result).alias(aliasname)
+
+
+def identity_key(*args, **kwargs):
+ r"""Generate "identity key" tuples, as are used as keys in the
+ :attr:`.Session.identity_map` dictionary.
+
+ This function has several call styles:
+
+ * ``identity_key(class, ident, identity_token=token)``
+
+ This form receives a mapped class and a primary key scalar or
+ tuple as an argument.
+
+ E.g.::
+
+ >>> identity_key(MyClass, (1, 2))
+ (<class '__main__.MyClass'>, (1, 2), None)
+
+ :param class: mapped class (must be a positional argument)
+ :param ident: primary key, may be a scalar or tuple argument.
+ :param identity_token: optional identity token
+
+ .. versionadded:: 1.2 added identity_token
+
+
+ * ``identity_key(instance=instance)``
+
+ This form will produce the identity key for a given instance. The
+ instance need not be persistent, only that its primary key attributes
+ are populated (else the key will contain ``None`` for those missing
+ values).
+
+ E.g.::
+
+ >>> instance = MyClass(1, 2)
+ >>> identity_key(instance=instance)
+ (<class '__main__.MyClass'>, (1, 2), None)
+
+ In this form, the given instance is ultimately run though
+ :meth:`_orm.Mapper.identity_key_from_instance`, which will have the
+ effect of performing a database check for the corresponding row
+ if the object is expired.
+
+ :param instance: object instance (must be given as a keyword arg)
+
+ * ``identity_key(class, row=row, identity_token=token)``
+
+ This form is similar to the class/tuple form, except is passed a
+ database result row as a :class:`.Row` object.
+
+ E.g.::
+
+ >>> row = engine.execute(\
+ text("select * from table where a=1 and b=2")\
+ ).first()
+ >>> identity_key(MyClass, row=row)
+ (<class '__main__.MyClass'>, (1, 2), None)
+
+ :param class: mapped class (must be a positional argument)
+ :param row: :class:`.Row` row returned by a :class:`_engine.CursorResult`
+ (must be given as a keyword arg)
+ :param identity_token: optional identity token
+
+ .. versionadded:: 1.2 added identity_token
+
+ """
+ if args:
+ row = None
+ largs = len(args)
+ if largs == 1:
+ class_ = args[0]
+ try:
+ row = kwargs.pop("row")
+ except KeyError:
+ ident = kwargs.pop("ident")
+ elif largs in (2, 3):
+ class_, ident = args
+ else:
+ raise sa_exc.ArgumentError(
+ "expected up to three positional arguments, " "got %s" % largs
+ )
+
+ identity_token = kwargs.pop("identity_token", None)
+ if kwargs:
+ raise sa_exc.ArgumentError(
+ "unknown keyword arguments: %s" % ", ".join(kwargs)
+ )
+ mapper = class_mapper(class_)
+ if row is None:
+ return mapper.identity_key_from_primary_key(
+ util.to_list(ident), identity_token=identity_token
+ )
+ else:
+ return mapper.identity_key_from_row(
+ row, identity_token=identity_token
+ )
+ else:
+ instance = kwargs.pop("instance")
+ if kwargs:
+ raise sa_exc.ArgumentError(
+ "unknown keyword arguments: %s" % ", ".join(kwargs.keys)
+ )
+ mapper = object_mapper(instance)
+ return mapper.identity_key_from_instance(instance)
+
+
+class ORMAdapter(sql_util.ColumnAdapter):
+ """ColumnAdapter subclass which excludes adaptation of entities from
+ non-matching mappers.
+
+ """
+
+ def __init__(
+ self,
+ entity,
+ equivalents=None,
+ adapt_required=False,
+ allow_label_resolve=True,
+ anonymize_labels=False,
+ ):
+ info = inspection.inspect(entity)
+
+ self.mapper = info.mapper
+ selectable = info.selectable
+ is_aliased_class = info.is_aliased_class
+ if is_aliased_class:
+ self.aliased_class = entity
+ else:
+ self.aliased_class = None
+
+ sql_util.ColumnAdapter.__init__(
+ self,
+ selectable,
+ equivalents,
+ adapt_required=adapt_required,
+ allow_label_resolve=allow_label_resolve,
+ anonymize_labels=anonymize_labels,
+ include_fn=self._include_fn,
+ )
+
+ def _include_fn(self, elem):
+ entity = elem._annotations.get("parentmapper", None)
+
+ return not entity or entity.isa(self.mapper) or self.mapper.isa(entity)
+
+
+class AliasedClass(object):
+ r"""Represents an "aliased" form of a mapped class for usage with Query.
+
+ The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias`
+ construct, this object mimics the mapped class using a
+ ``__getattr__`` scheme and maintains a reference to a
+ real :class:`~sqlalchemy.sql.expression.Alias` object.
+
+ A primary purpose of :class:`.AliasedClass` is to serve as an alternate
+ within a SQL statement generated by the ORM, such that an existing
+ mapped entity can be used in multiple contexts. A simple example::
+
+ # find all pairs of users with the same name
+ user_alias = aliased(User)
+ session.query(User, user_alias).\
+ join((user_alias, User.id > user_alias.id)).\
+ filter(User.name == user_alias.name)
+
+ :class:`.AliasedClass` is also capable of mapping an existing mapped
+ class to an entirely new selectable, provided this selectable is column-
+ compatible with the existing mapped selectable, and it can also be
+ configured in a mapping as the target of a :func:`_orm.relationship`.
+ See the links below for examples.
+
+ The :class:`.AliasedClass` object is constructed typically using the
+ :func:`_orm.aliased` function. It also is produced with additional
+ configuration when using the :func:`_orm.with_polymorphic` function.
+
+ The resulting object is an instance of :class:`.AliasedClass`.
+ This object implements an attribute scheme which produces the
+ same attribute and method interface as the original mapped
+ class, allowing :class:`.AliasedClass` to be compatible
+ with any attribute technique which works on the original class,
+ including hybrid attributes (see :ref:`hybrids_toplevel`).
+
+ The :class:`.AliasedClass` can be inspected for its underlying
+ :class:`_orm.Mapper`, aliased selectable, and other information
+ using :func:`_sa.inspect`::
+
+ from sqlalchemy import inspect
+ my_alias = aliased(MyClass)
+ insp = inspect(my_alias)
+
+ The resulting inspection object is an instance of :class:`.AliasedInsp`.
+
+
+ .. seealso::
+
+ :func:`.aliased`
+
+ :func:`.with_polymorphic`
+
+ :ref:`relationship_aliased_class`
+
+ :ref:`relationship_to_window_function`
+
+
+ """
+
+ def __init__(
+ self,
+ mapped_class_or_ac,
+ alias=None,
+ name=None,
+ flat=False,
+ adapt_on_names=False,
+ # TODO: None for default here?
+ with_polymorphic_mappers=(),
+ with_polymorphic_discriminator=None,
+ base_alias=None,
+ use_mapper_path=False,
+ represents_outer_join=False,
+ ):
+ insp = inspection.inspect(mapped_class_or_ac)
+ mapper = insp.mapper
+
+ nest_adapters = False
+
+ if alias is None:
+ if insp.is_aliased_class and insp.selectable._is_subquery:
+ alias = insp.selectable.alias()
+ else:
+ alias = (
+ mapper._with_polymorphic_selectable._anonymous_fromclause(
+ name=name,
+ flat=flat,
+ )
+ )
+ elif insp.is_aliased_class:
+ nest_adapters = True
+
+ self._aliased_insp = AliasedInsp(
+ self,
+ insp,
+ alias,
+ name,
+ with_polymorphic_mappers
+ if with_polymorphic_mappers
+ else mapper.with_polymorphic_mappers,
+ with_polymorphic_discriminator
+ if with_polymorphic_discriminator is not None
+ else mapper.polymorphic_on,
+ base_alias,
+ use_mapper_path,
+ adapt_on_names,
+ represents_outer_join,
+ nest_adapters,
+ )
+
+ self.__name__ = "AliasedClass_%s" % mapper.class_.__name__
+
+ @classmethod
+ def _reconstitute_from_aliased_insp(cls, aliased_insp):
+ obj = cls.__new__(cls)
+ obj.__name__ = "AliasedClass_%s" % aliased_insp.mapper.class_.__name__
+ obj._aliased_insp = aliased_insp
+
+ if aliased_insp._is_with_polymorphic:
+ for sub_aliased_insp in aliased_insp._with_polymorphic_entities:
+ if sub_aliased_insp is not aliased_insp:
+ ent = AliasedClass._reconstitute_from_aliased_insp(
+ sub_aliased_insp
+ )
+ setattr(obj, sub_aliased_insp.class_.__name__, ent)
+
+ return obj
+
+ def __getattr__(self, key):
+ try:
+ _aliased_insp = self.__dict__["_aliased_insp"]
+ except KeyError:
+ raise AttributeError()
+ else:
+ target = _aliased_insp._target
+ # maintain all getattr mechanics
+ attr = getattr(target, key)
+
+ # attribute is a method, that will be invoked against a
+ # "self"; so just return a new method with the same function and
+ # new self
+ if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
+ return types.MethodType(attr.__func__, self)
+
+ # attribute is a descriptor, that will be invoked against a
+ # "self"; so invoke the descriptor against this self
+ if hasattr(attr, "__get__"):
+ attr = attr.__get__(None, self)
+
+ # attributes within the QueryableAttribute system will want this
+ # to be invoked so the object can be adapted
+ if hasattr(attr, "adapt_to_entity"):
+ attr = attr.adapt_to_entity(_aliased_insp)
+ setattr(self, key, attr)
+
+ return attr
+
+ def _get_from_serialized(self, key, mapped_class, aliased_insp):
+ # this method is only used in terms of the
+ # sqlalchemy.ext.serializer extension
+ attr = getattr(mapped_class, key)
+ if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
+ return types.MethodType(attr.__func__, self)
+
+ # attribute is a descriptor, that will be invoked against a
+ # "self"; so invoke the descriptor against this self
+ if hasattr(attr, "__get__"):
+ attr = attr.__get__(None, self)
+
+ # attributes within the QueryableAttribute system will want this
+ # to be invoked so the object can be adapted
+ if hasattr(attr, "adapt_to_entity"):
+ aliased_insp._weak_entity = weakref.ref(self)
+ attr = attr.adapt_to_entity(aliased_insp)
+ setattr(self, key, attr)
+
+ return attr
+
+ def __repr__(self):
+ return "<AliasedClass at 0x%x; %s>" % (
+ id(self),
+ self._aliased_insp._target.__name__,
+ )
+
+ def __str__(self):
+ return str(self._aliased_insp)
+
+
+class AliasedInsp(
+ ORMEntityColumnsClauseRole,
+ ORMFromClauseRole,
+ sql_base.MemoizedHasCacheKey,
+ InspectionAttr,
+):
+ """Provide an inspection interface for an
+ :class:`.AliasedClass` object.
+
+ The :class:`.AliasedInsp` object is returned
+ given an :class:`.AliasedClass` using the
+ :func:`_sa.inspect` function::
+
+ from sqlalchemy import inspect
+ from sqlalchemy.orm import aliased
+
+ my_alias = aliased(MyMappedClass)
+ insp = inspect(my_alias)
+
+ Attributes on :class:`.AliasedInsp`
+ include:
+
+ * ``entity`` - the :class:`.AliasedClass` represented.
+ * ``mapper`` - the :class:`_orm.Mapper` mapping the underlying class.
+ * ``selectable`` - the :class:`_expression.Alias`
+ construct which ultimately
+ represents an aliased :class:`_schema.Table` or
+ :class:`_expression.Select`
+ construct.
+ * ``name`` - the name of the alias. Also is used as the attribute
+ name when returned in a result tuple from :class:`_query.Query`.
+ * ``with_polymorphic_mappers`` - collection of :class:`_orm.Mapper`
+ objects
+ indicating all those mappers expressed in the select construct
+ for the :class:`.AliasedClass`.
+ * ``polymorphic_on`` - an alternate column or SQL expression which
+ will be used as the "discriminator" for a polymorphic load.
+
+ .. seealso::
+
+ :ref:`inspection_toplevel`
+
+ """
+
+ def __init__(
+ self,
+ entity,
+ inspected,
+ selectable,
+ name,
+ with_polymorphic_mappers,
+ polymorphic_on,
+ _base_alias,
+ _use_mapper_path,
+ adapt_on_names,
+ represents_outer_join,
+ nest_adapters,
+ ):
+
+ mapped_class_or_ac = inspected.entity
+ mapper = inspected.mapper
+
+ self._weak_entity = weakref.ref(entity)
+ self.mapper = mapper
+ self.selectable = (
+ self.persist_selectable
+ ) = self.local_table = selectable
+ self.name = name
+ self.polymorphic_on = polymorphic_on
+ self._base_alias = weakref.ref(_base_alias or self)
+ self._use_mapper_path = _use_mapper_path
+ self.represents_outer_join = represents_outer_join
+ self._nest_adapters = nest_adapters
+
+ if with_polymorphic_mappers:
+ self._is_with_polymorphic = True
+ self.with_polymorphic_mappers = with_polymorphic_mappers
+ self._with_polymorphic_entities = []
+ for poly in self.with_polymorphic_mappers:
+ if poly is not mapper:
+ ent = AliasedClass(
+ poly.class_,
+ selectable,
+ base_alias=self,
+ adapt_on_names=adapt_on_names,
+ use_mapper_path=_use_mapper_path,
+ )
+
+ setattr(self.entity, poly.class_.__name__, ent)
+ self._with_polymorphic_entities.append(ent._aliased_insp)
+
+ else:
+ self._is_with_polymorphic = False
+ self.with_polymorphic_mappers = [mapper]
+
+ self._adapter = sql_util.ColumnAdapter(
+ selectable,
+ equivalents=mapper._equivalent_columns,
+ adapt_on_names=adapt_on_names,
+ anonymize_labels=True,
+ # make sure the adapter doesn't try to grab other tables that
+ # are not even the thing we are mapping, such as embedded
+ # selectables in subqueries or CTEs. See issue #6060
+ adapt_from_selectables={
+ m.selectable
+ for m in self.with_polymorphic_mappers
+ if not adapt_on_names
+ },
+ )
+
+ if nest_adapters:
+ self._adapter = inspected._adapter.wrap(self._adapter)
+
+ self._adapt_on_names = adapt_on_names
+ self._target = mapped_class_or_ac
+ # self._target = mapper.class_ # mapped_class_or_ac
+
+ @property
+ def entity(self):
+ # to eliminate reference cycles, the AliasedClass is held weakly.
+ # this produces some situations where the AliasedClass gets lost,
+ # particularly when one is created internally and only the AliasedInsp
+ # is passed around.
+ # to work around this case, we just generate a new one when we need
+ # it, as it is a simple class with very little initial state on it.
+ ent = self._weak_entity()
+ if ent is None:
+ ent = AliasedClass._reconstitute_from_aliased_insp(self)
+ self._weak_entity = weakref.ref(ent)
+ return ent
+
+ is_aliased_class = True
+ "always returns True"
+
+ @util.memoized_instancemethod
+ def __clause_element__(self):
+ return self.selectable._annotate(
+ {
+ "parentmapper": self.mapper,
+ "parententity": self,
+ "entity_namespace": self,
+ }
+ )._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": self}
+ )
+
+ @property
+ def entity_namespace(self):
+ return self.entity
+
+ _cache_key_traversal = [
+ ("name", visitors.ExtendedInternalTraversal.dp_string),
+ ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean),
+ ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement),
+ ]
+
+ @property
+ def class_(self):
+ """Return the mapped class ultimately represented by this
+ :class:`.AliasedInsp`."""
+ return self.mapper.class_
+
+ @property
+ def _path_registry(self):
+ if self._use_mapper_path:
+ return self.mapper._path_registry
+ else:
+ return PathRegistry.per_mapper(self)
+
+ def __getstate__(self):
+ return {
+ "entity": self.entity,
+ "mapper": self.mapper,
+ "alias": self.selectable,
+ "name": self.name,
+ "adapt_on_names": self._adapt_on_names,
+ "with_polymorphic_mappers": self.with_polymorphic_mappers,
+ "with_polymorphic_discriminator": self.polymorphic_on,
+ "base_alias": self._base_alias(),
+ "use_mapper_path": self._use_mapper_path,
+ "represents_outer_join": self.represents_outer_join,
+ "nest_adapters": self._nest_adapters,
+ }
+
+ def __setstate__(self, state):
+ self.__init__(
+ state["entity"],
+ state["mapper"],
+ state["alias"],
+ state["name"],
+ state["with_polymorphic_mappers"],
+ state["with_polymorphic_discriminator"],
+ state["base_alias"],
+ state["use_mapper_path"],
+ state["adapt_on_names"],
+ state["represents_outer_join"],
+ state["nest_adapters"],
+ )
+
+ def _adapt_element(self, elem, key=None):
+ d = {
+ "parententity": self,
+ "parentmapper": self.mapper,
+ }
+ if key:
+ d["proxy_key"] = key
+ return (
+ self._adapter.traverse(elem)
+ ._annotate(d)
+ ._set_propagate_attrs(
+ {"compile_state_plugin": "orm", "plugin_subject": self}
+ )
+ )
+
+ def _entity_for_mapper(self, mapper):
+ self_poly = self.with_polymorphic_mappers
+ if mapper in self_poly:
+ if mapper is self.mapper:
+ return self
+ else:
+ return getattr(
+ self.entity, mapper.class_.__name__
+ )._aliased_insp
+ elif mapper.isa(self.mapper):
+ return self
+ else:
+ assert False, "mapper %s doesn't correspond to %s" % (mapper, self)
+
+ @util.memoized_property
+ def _get_clause(self):
+ onclause, replacemap = self.mapper._get_clause
+ return (
+ self._adapter.traverse(onclause),
+ {
+ self._adapter.traverse(col): param
+ for col, param in replacemap.items()
+ },
+ )
+
+ @util.memoized_property
+ def _memoized_values(self):
+ return {}
+
+ @util.memoized_property
+ def _all_column_expressions(self):
+ if self._is_with_polymorphic:
+ cols_plus_keys = self.mapper._columns_plus_keys(
+ [ent.mapper for ent in self._with_polymorphic_entities]
+ )
+ else:
+ cols_plus_keys = self.mapper._columns_plus_keys()
+
+ cols_plus_keys = [
+ (key, self._adapt_element(col)) for key, col in cols_plus_keys
+ ]
+
+ return ColumnCollection(cols_plus_keys)
+
+ def _memo(self, key, callable_, *args, **kw):
+ if key in self._memoized_values:
+ return self._memoized_values[key]
+ else:
+ self._memoized_values[key] = value = callable_(*args, **kw)
+ return value
+
+ def __repr__(self):
+ if self.with_polymorphic_mappers:
+ with_poly = "(%s)" % ", ".join(
+ mp.class_.__name__ for mp in self.with_polymorphic_mappers
+ )
+ else:
+ with_poly = ""
+ return "<AliasedInsp at 0x%x; %s%s>" % (
+ id(self),
+ self.class_.__name__,
+ with_poly,
+ )
+
+ def __str__(self):
+ if self._is_with_polymorphic:
+ return "with_polymorphic(%s, [%s])" % (
+ self._target.__name__,
+ ", ".join(
+ mp.class_.__name__
+ for mp in self.with_polymorphic_mappers
+ if mp is not self.mapper
+ ),
+ )
+ else:
+ return "aliased(%s)" % (self._target.__name__,)
+
+
+class _WrapUserEntity(object):
+ """A wrapper used within the loader_criteria lambda caller so that
+ we can bypass declared_attr descriptors on unmapped mixins, which
+ normally emit a warning for such use.
+
+ might also be useful for other per-lambda instrumentations should
+ the need arise.
+
+ """
+
+ __slots__ = ("subject",)
+
+ def __init__(self, subject):
+ self.subject = subject
+
+ @util.preload_module("sqlalchemy.orm.decl_api")
+ def __getattribute__(self, name):
+ decl_api = util.preloaded.orm.decl_api
+
+ subject = object.__getattribute__(self, "subject")
+ if name in subject.__dict__ and isinstance(
+ subject.__dict__[name], decl_api.declared_attr
+ ):
+ return subject.__dict__[name].fget(subject)
+ else:
+ return getattr(subject, name)
+
+
+class LoaderCriteriaOption(CriteriaOption):
+ """Add additional WHERE criteria to the load for all occurrences of
+ a particular entity.
+
+ :class:`_orm.LoaderCriteriaOption` is invoked using the
+ :func:`_orm.with_loader_criteria` function; see that function for
+ details.
+
+ .. versionadded:: 1.4
+
+ """
+
+ _traverse_internals = [
+ ("root_entity", visitors.ExtendedInternalTraversal.dp_plain_obj),
+ ("entity", visitors.ExtendedInternalTraversal.dp_has_cache_key),
+ ("where_criteria", visitors.InternalTraversal.dp_clauseelement),
+ ("include_aliases", visitors.InternalTraversal.dp_boolean),
+ ("propagate_to_loaders", visitors.InternalTraversal.dp_boolean),
+ ]
+
+ def __init__(
+ self,
+ entity_or_base,
+ where_criteria,
+ loader_only=False,
+ include_aliases=False,
+ propagate_to_loaders=True,
+ track_closure_variables=True,
+ ):
+ """Add additional WHERE criteria to the load for all occurrences of
+ a particular entity.
+
+ .. versionadded:: 1.4
+
+ The :func:`_orm.with_loader_criteria` option is intended to add
+ limiting criteria to a particular kind of entity in a query,
+ **globally**, meaning it will apply to the entity as it appears
+ in the SELECT query as well as within any subqueries, join
+ conditions, and relationship loads, including both eager and lazy
+ loaders, without the need for it to be specified in any particular
+ part of the query. The rendering logic uses the same system used by
+ single table inheritance to ensure a certain discriminator is applied
+ to a table.
+
+ E.g., using :term:`2.0-style` queries, we can limit the way the
+ ``User.addresses`` collection is loaded, regardless of the kind
+ of loading used::
+
+ from sqlalchemy.orm import with_loader_criteria
+
+ stmt = select(User).options(
+ selectinload(User.addresses),
+ with_loader_criteria(Address, Address.email_address != 'foo'))
+ )
+
+ Above, the "selectinload" for ``User.addresses`` will apply the
+ given filtering criteria to the WHERE clause.
+
+ Another example, where the filtering will be applied to the
+ ON clause of the join, in this example using :term:`1.x style`
+ queries::
+
+ q = session.query(User).outerjoin(User.addresses).options(
+ with_loader_criteria(Address, Address.email_address != 'foo'))
+ )
+
+ The primary purpose of :func:`_orm.with_loader_criteria` is to use
+ it in the :meth:`_orm.SessionEvents.do_orm_execute` event handler
+ to ensure that all occurrences of a particular entity are filtered
+ in a certain way, such as filtering for access control roles. It
+ also can be used to apply criteria to relationship loads. In the
+ example below, we can apply a certain set of rules to all queries
+ emitted by a particular :class:`_orm.Session`::
+
+ session = Session(bind=engine)
+
+ @event.listens_for("do_orm_execute", session)
+ def _add_filtering_criteria(execute_state):
+
+ if (
+ execute_state.is_select
+ and not execute_state.is_column_load
+ and not execute_state.is_relationship_load
+ ):
+ execute_state.statement = execute_state.statement.options(
+ with_loader_criteria(
+ SecurityRole,
+ lambda cls: cls.role.in_(['some_role']),
+ include_aliases=True
+ )
+ )
+
+ In the above example, the :meth:`_orm.SessionEvents.do_orm_execute`
+ event will intercept all queries emitted using the
+ :class:`_orm.Session`. For those queries which are SELECT statements
+ and are not attribute or relationship loads a custom
+ :func:`_orm.with_loader_criteria` option is added to the query. The
+ :func:`_orm.with_loader_criteria` option will be used in the given
+ statement and will also be automatically propagated to all relationship
+ loads that descend from this query.
+
+ The criteria argument given is a ``lambda`` that accepts a ``cls``
+ argument. The given class will expand to include all mapped subclass
+ and need not itself be a mapped class.
+
+ .. tip::
+
+ When using :func:`_orm.with_loader_criteria` option in
+ conjunction with the :func:`_orm.contains_eager` loader option,
+ it's important to note that :func:`_orm.with_loader_criteria` only
+ affects the part of the query that determines what SQL is rendered
+ in terms of the WHERE and FROM clauses. The
+ :func:`_orm.contains_eager` option does not affect the rendering of
+ the SELECT statement outside of the columns clause, so does not have
+ any interaction with the :func:`_orm.with_loader_criteria` option.
+ However, the way things "work" is that :func:`_orm.contains_eager`
+ is meant to be used with a query that is already selecting from the
+ additional entities in some way, where
+ :func:`_orm.with_loader_criteria` can apply it's additional
+ criteria.
+
+ In the example below, assuming a mapping relationship as
+ ``A -> A.bs -> B``, the given :func:`_orm.with_loader_criteria`
+ option will affect the way in which the JOIN is rendered::
+
+ stmt = select(A).join(A.bs).options(
+ contains_eager(A.bs),
+ with_loader_criteria(B, B.flag == 1)
+ )
+
+ Above, the given :func:`_orm.with_loader_criteria` option will
+ affect the ON clause of the JOIN that is specified by
+ ``.join(A.bs)``, so is applied as expected. The
+ :func:`_orm.contains_eager` option has the effect that columns from
+ ``B`` are added to the columns clause::
+
+ SELECT
+ b.id, b.a_id, b.data, b.flag,
+ a.id AS id_1,
+ a.data AS data_1
+ FROM a JOIN b ON a.id = b.a_id AND b.flag = :flag_1
+
+
+ The use of the :func:`_orm.contains_eager` option within the above
+ statement has no effect on the behavior of the
+ :func:`_orm.with_loader_criteria` option. If the
+ :func:`_orm.contains_eager` option were omitted, the SQL would be
+ the same as regards the FROM and WHERE clauses, where
+ :func:`_orm.with_loader_criteria` continues to add its criteria to
+ the ON clause of the JOIN. The addition of
+ :func:`_orm.contains_eager` only affects the columns clause, in that
+ additional columns against ``b`` are added which are then consumed
+ by the ORM to produce ``B`` instances.
+
+ .. warning:: The use of a lambda inside of the call to
+ :func:`_orm.with_loader_criteria` is only invoked **once per unique
+ class**. Custom functions should not be invoked within this lambda.
+ See :ref:`engine_lambda_caching` for an overview of the "lambda SQL"
+ feature, which is for advanced use only.
+
+ :param entity_or_base: a mapped class, or a class that is a super
+ class of a particular set of mapped classes, to which the rule
+ will apply.
+
+ :param where_criteria: a Core SQL expression that applies limiting
+ criteria. This may also be a "lambda:" or Python function that
+ accepts a target class as an argument, when the given class is
+ a base with many different mapped subclasses.
+
+ .. note:: To support pickling, use a module-level Python function to
+ produce the SQL expression instead of a lambda or a fixed SQL
+ expression, which tend to not be picklable.
+
+ :param include_aliases: if True, apply the rule to :func:`_orm.aliased`
+ constructs as well.
+
+ :param propagate_to_loaders: defaults to True, apply to relationship
+ loaders such as lazy loaders. This indicates that the
+ option object itself including SQL expression is carried along with
+ each loaded instance. Set to ``False`` to prevent the object from
+ being assigned to individual instances.
+
+ .. seealso::
+
+ :ref:`examples_session_orm_events` - includes examples of using
+ :func:`_orm.with_loader_criteria`.
+
+ :ref:`do_orm_execute_global_criteria` - basic example on how to
+ combine :func:`_orm.with_loader_criteria` with the
+ :meth:`_orm.SessionEvents.do_orm_execute` event.
+
+ :param track_closure_variables: when False, closure variables inside
+ of a lambda expression will not be used as part of
+ any cache key. This allows more complex expressions to be used
+ inside of a lambda expression but requires that the lambda ensures
+ it returns the identical SQL every time given a particular class.
+
+ .. versionadded:: 1.4.0b2
+
+ """
+ entity = inspection.inspect(entity_or_base, False)
+ if entity is None:
+ self.root_entity = entity_or_base
+ self.entity = None
+ else:
+ self.root_entity = None
+ self.entity = entity
+
+ self._where_crit_orig = where_criteria
+ if callable(where_criteria):
+ self.deferred_where_criteria = True
+ self.where_criteria = lambdas.DeferredLambdaElement(
+ where_criteria,
+ roles.WhereHavingRole,
+ lambda_args=(
+ _WrapUserEntity(
+ self.root_entity
+ if self.root_entity is not None
+ else self.entity.entity,
+ ),
+ ),
+ opts=lambdas.LambdaOptions(
+ track_closure_variables=track_closure_variables
+ ),
+ )
+ else:
+ self.deferred_where_criteria = False
+ self.where_criteria = coercions.expect(
+ roles.WhereHavingRole, where_criteria
+ )
+
+ self.include_aliases = include_aliases
+ self.propagate_to_loaders = propagate_to_loaders
+
+ @classmethod
+ def _unreduce(
+ cls, entity, where_criteria, include_aliases, propagate_to_loaders
+ ):
+ return LoaderCriteriaOption(
+ entity,
+ where_criteria,
+ include_aliases=include_aliases,
+ propagate_to_loaders=propagate_to_loaders,
+ )
+
+ def __reduce__(self):
+ return (
+ LoaderCriteriaOption._unreduce,
+ (
+ self.entity.class_ if self.entity else self.root_entity,
+ self._where_crit_orig,
+ self.include_aliases,
+ self.propagate_to_loaders,
+ ),
+ )
+
+ def _all_mappers(self):
+
+ if self.entity:
+ for ent in self.entity.mapper.self_and_descendants:
+ yield ent
+ else:
+ stack = list(self.root_entity.__subclasses__())
+ while stack:
+ subclass = stack.pop(0)
+ ent = inspection.inspect(subclass, raiseerr=False)
+ if ent:
+ for mp in ent.mapper.self_and_descendants:
+ yield mp
+ else:
+ stack.extend(subclass.__subclasses__())
+
+ def _should_include(self, compile_state):
+ if (
+ compile_state.select_statement._annotations.get(
+ "for_loader_criteria", None
+ )
+ is self
+ ):
+ return False
+ return True
+
+ def _resolve_where_criteria(self, ext_info):
+ if self.deferred_where_criteria:
+ crit = self.where_criteria._resolve_with_args(ext_info.entity)
+ else:
+ crit = self.where_criteria
+ return sql_util._deep_annotate(
+ crit, {"for_loader_criteria": self}, detect_subquery_cols=True
+ )
+
+ def process_compile_state_replaced_entities(
+ self, compile_state, mapper_entities
+ ):
+ return self.process_compile_state(compile_state)
+
+ def process_compile_state(self, compile_state):
+ """Apply a modification to a given :class:`.CompileState`."""
+
+ # if options to limit the criteria to immediate query only,
+ # use compile_state.attributes instead
+
+ if compile_state.compile_options._with_polymorphic_adapt_map:
+ util.warn(
+ "The with_loader_criteria() function may not work "
+ "correctly with the legacy Query.with_polymorphic() feature. "
+ "Please migrate code to use the with_polymorphic() standalone "
+ "function before using with_loader_criteria()."
+ )
+ self.get_global_criteria(compile_state.global_attributes)
+
+ def get_global_criteria(self, attributes):
+ for mp in self._all_mappers():
+ load_criteria = attributes.setdefault(
+ ("additional_entity_criteria", mp), []
+ )
+
+ load_criteria.append(self)
+
+
+inspection._inspects(AliasedClass)(lambda target: target._aliased_insp)
+inspection._inspects(AliasedInsp)(lambda target: target)
+
+
+def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False):
+ """Produce an alias of the given element, usually an :class:`.AliasedClass`
+ instance.
+
+ E.g.::
+
+ my_alias = aliased(MyClass)
+
+ session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id)
+
+ The :func:`.aliased` function is used to create an ad-hoc mapping of a
+ mapped class to a new selectable. By default, a selectable is generated
+ from the normally mapped selectable (typically a :class:`_schema.Table`
+ ) using the
+ :meth:`_expression.FromClause.alias` method. However, :func:`.aliased`
+ can also be
+ used to link the class to a new :func:`_expression.select` statement.
+ Also, the :func:`.with_polymorphic` function is a variant of
+ :func:`.aliased` that is intended to specify a so-called "polymorphic
+ selectable", that corresponds to the union of several joined-inheritance
+ subclasses at once.
+
+ For convenience, the :func:`.aliased` function also accepts plain
+ :class:`_expression.FromClause` constructs, such as a
+ :class:`_schema.Table` or
+ :func:`_expression.select` construct. In those cases, the
+ :meth:`_expression.FromClause.alias`
+ method is called on the object and the new
+ :class:`_expression.Alias` object returned. The returned
+ :class:`_expression.Alias` is not
+ ORM-mapped in this case.
+
+ .. seealso::
+
+ :ref:`tutorial_orm_entity_aliases` - in the :ref:`unified_tutorial`
+
+ :ref:`orm_queryguide_orm_aliases` - in the :ref:`queryguide_toplevel`
+
+ :param element: element to be aliased. Is normally a mapped class,
+ but for convenience can also be a :class:`_expression.FromClause`
+ element.
+
+ :param alias: Optional selectable unit to map the element to. This is
+ usually used to link the object to a subquery, and should be an aliased
+ select construct as one would produce from the
+ :meth:`_query.Query.subquery` method or
+ the :meth:`_expression.Select.subquery` or
+ :meth:`_expression.Select.alias` methods of the :func:`_expression.select`
+ construct.
+
+ :param name: optional string name to use for the alias, if not specified
+ by the ``alias`` parameter. The name, among other things, forms the
+ attribute name that will be accessible via tuples returned by a
+ :class:`_query.Query` object. Not supported when creating aliases
+ of :class:`_sql.Join` objects.
+
+ :param flat: Boolean, will be passed through to the
+ :meth:`_expression.FromClause.alias` call so that aliases of
+ :class:`_expression.Join` objects will alias the individual tables
+ inside the join, rather than creating a subquery. This is generally
+ supported by all modern databases with regards to right-nested joins
+ and generally produces more efficient queries.
+
+ :param adapt_on_names: if True, more liberal "matching" will be used when
+ mapping the mapped columns of the ORM entity to those of the
+ given selectable - a name-based match will be performed if the
+ given selectable doesn't otherwise have a column that corresponds
+ to one on the entity. The use case for this is when associating
+ an entity with some derived selectable such as one that uses
+ aggregate functions::
+
+ class UnitPrice(Base):
+ __tablename__ = 'unit_price'
+ ...
+ unit_id = Column(Integer)
+ price = Column(Numeric)
+
+ aggregated_unit_price = Session.query(
+ func.sum(UnitPrice.price).label('price')
+ ).group_by(UnitPrice.unit_id).subquery()
+
+ aggregated_unit_price = aliased(UnitPrice,
+ alias=aggregated_unit_price, adapt_on_names=True)
+
+ Above, functions on ``aggregated_unit_price`` which refer to
+ ``.price`` will return the
+ ``func.sum(UnitPrice.price).label('price')`` column, as it is
+ matched on the name "price". Ordinarily, the "price" function
+ wouldn't have any "column correspondence" to the actual
+ ``UnitPrice.price`` column as it is not a proxy of the original.
+
+ """
+ if isinstance(element, expression.FromClause):
+ if adapt_on_names:
+ raise sa_exc.ArgumentError(
+ "adapt_on_names only applies to ORM elements"
+ )
+ if name:
+ return element.alias(name=name, flat=flat)
+ else:
+ return coercions.expect(
+ roles.AnonymizedFromClauseRole, element, flat=flat
+ )
+ else:
+ return AliasedClass(
+ element,
+ alias=alias,
+ flat=flat,
+ name=name,
+ adapt_on_names=adapt_on_names,
+ )
+
+
+def with_polymorphic(
+ base,
+ classes,
+ selectable=False,
+ flat=False,
+ polymorphic_on=None,
+ aliased=False,
+ adapt_on_names=False,
+ innerjoin=False,
+ _use_mapper_path=False,
+ _existing_alias=None,
+):
+ """Produce an :class:`.AliasedClass` construct which specifies
+ columns for descendant mappers of the given base.
+
+ Using this method will ensure that each descendant mapper's
+ tables are included in the FROM clause, and will allow filter()
+ criterion to be used against those tables. The resulting
+ instances will also have those columns already loaded so that
+ no "post fetch" of those columns will be required.
+
+ .. seealso::
+
+ :ref:`with_polymorphic` - full discussion of
+ :func:`_orm.with_polymorphic`.
+
+ :param base: Base class to be aliased.
+
+ :param classes: a single class or mapper, or list of
+ class/mappers, which inherit from the base class.
+ Alternatively, it may also be the string ``'*'``, in which case
+ all descending mapped classes will be added to the FROM clause.
+
+ :param aliased: when True, the selectable will be aliased. For a
+ JOIN, this means the JOIN will be SELECTed from inside of a subquery
+ unless the :paramref:`_orm.with_polymorphic.flat` flag is set to
+ True, which is recommended for simpler use cases.
+
+ :param flat: Boolean, will be passed through to the
+ :meth:`_expression.FromClause.alias` call so that aliases of
+ :class:`_expression.Join` objects will alias the individual tables
+ inside the join, rather than creating a subquery. This is generally
+ supported by all modern databases with regards to right-nested joins
+ and generally produces more efficient queries. Setting this flag is
+ recommended as long as the resulting SQL is functional.
+
+ :param selectable: a table or subquery that will
+ be used in place of the generated FROM clause. This argument is
+ required if any of the desired classes use concrete table
+ inheritance, since SQLAlchemy currently cannot generate UNIONs
+ among tables automatically. If used, the ``selectable`` argument
+ must represent the full set of tables and columns mapped by every
+ mapped class. Otherwise, the unaccounted mapped columns will
+ result in their table being appended directly to the FROM clause
+ which will usually lead to incorrect results.
+
+ When left at its default value of ``False``, the polymorphic
+ selectable assigned to the base mapper is used for selecting rows.
+ However, it may also be passed as ``None``, which will bypass the
+ configured polymorphic selectable and instead construct an ad-hoc
+ selectable for the target classes given; for joined table inheritance
+ this will be a join that includes all target mappers and their
+ subclasses.
+
+ :param polymorphic_on: a column to be used as the "discriminator"
+ column for the given selectable. If not given, the polymorphic_on
+ attribute of the base classes' mapper will be used, if any. This
+ is useful for mappings that don't have polymorphic loading
+ behavior by default.
+
+ :param innerjoin: if True, an INNER JOIN will be used. This should
+ only be specified if querying for one specific subtype only
+
+ :param adapt_on_names: Passes through the
+ :paramref:`_orm.aliased.adapt_on_names`
+ parameter to the aliased object. This may be useful in situations where
+ the given selectable is not directly related to the existing mapped
+ selectable.
+
+ .. versionadded:: 1.4.33
+
+ """
+ primary_mapper = _class_to_mapper(base)
+
+ if selectable not in (None, False) and flat:
+ raise sa_exc.ArgumentError(
+ "the 'flat' and 'selectable' arguments cannot be passed "
+ "simultaneously to with_polymorphic()"
+ )
+
+ if _existing_alias:
+ assert _existing_alias.mapper is primary_mapper
+ classes = util.to_set(classes)
+ new_classes = set(
+ [mp.class_ for mp in _existing_alias.with_polymorphic_mappers]
+ )
+ if classes == new_classes:
+ return _existing_alias
+ else:
+ classes = classes.union(new_classes)
+ mappers, selectable = primary_mapper._with_polymorphic_args(
+ classes, selectable, innerjoin=innerjoin
+ )
+ if aliased or flat:
+ selectable = selectable._anonymous_fromclause(flat=flat)
+ return AliasedClass(
+ base,
+ selectable,
+ adapt_on_names=adapt_on_names,
+ with_polymorphic_mappers=mappers,
+ with_polymorphic_discriminator=polymorphic_on,
+ use_mapper_path=_use_mapper_path,
+ represents_outer_join=not innerjoin,
+ )
+
+
+@inspection._self_inspects
+class Bundle(
+ ORMColumnsClauseRole,
+ SupportsCloneAnnotations,
+ sql_base.MemoizedHasCacheKey,
+ InspectionAttr,
+):
+ """A grouping of SQL expressions that are returned by a :class:`.Query`
+ under one namespace.
+
+ The :class:`.Bundle` essentially allows nesting of the tuple-based
+ results returned by a column-oriented :class:`_query.Query` object.
+ It also
+ is extensible via simple subclassing, where the primary capability
+ to override is that of how the set of expressions should be returned,
+ allowing post-processing as well as custom return types, without
+ involving ORM identity-mapped classes.
+
+ .. versionadded:: 0.9.0
+
+ .. seealso::
+
+ :ref:`bundles`
+
+
+ """
+
+ single_entity = False
+ """If True, queries for a single Bundle will be returned as a single
+ entity, rather than an element within a keyed tuple."""
+
+ is_clause_element = False
+
+ is_mapper = False
+
+ is_aliased_class = False
+
+ is_bundle = True
+
+ _propagate_attrs = util.immutabledict()
+
+ def __init__(self, name, *exprs, **kw):
+ r"""Construct a new :class:`.Bundle`.
+
+ e.g.::
+
+ bn = Bundle("mybundle", MyClass.x, MyClass.y)
+
+ for row in session.query(bn).filter(
+ bn.c.x == 5).filter(bn.c.y == 4):
+ print(row.mybundle.x, row.mybundle.y)
+
+ :param name: name of the bundle.
+ :param \*exprs: columns or SQL expressions comprising the bundle.
+ :param single_entity=False: if True, rows for this :class:`.Bundle`
+ can be returned as a "single entity" outside of any enclosing tuple
+ in the same manner as a mapped entity.
+
+ """
+ self.name = self._label = name
+ self.exprs = exprs = [
+ coercions.expect(
+ roles.ColumnsClauseRole, expr, apply_propagate_attrs=self
+ )
+ for expr in exprs
+ ]
+
+ self.c = self.columns = ColumnCollection(
+ (getattr(col, "key", col._label), col)
+ for col in [e._annotations.get("bundle", e) for e in exprs]
+ )
+ self.single_entity = kw.pop("single_entity", self.single_entity)
+
+ def _gen_cache_key(self, anon_map, bindparams):
+ return (self.__class__, self.name, self.single_entity) + tuple(
+ [expr._gen_cache_key(anon_map, bindparams) for expr in self.exprs]
+ )
+
+ @property
+ def mapper(self):
+ return self.exprs[0]._annotations.get("parentmapper", None)
+
+ @property
+ def entity(self):
+ return self.exprs[0]._annotations.get("parententity", None)
+
+ @property
+ def entity_namespace(self):
+ return self.c
+
+ columns = None
+ """A namespace of SQL expressions referred to by this :class:`.Bundle`.
+
+ e.g.::
+
+ bn = Bundle("mybundle", MyClass.x, MyClass.y)
+
+ q = sess.query(bn).filter(bn.c.x == 5)
+
+ Nesting of bundles is also supported::
+
+ b1 = Bundle("b1",
+ Bundle('b2', MyClass.a, MyClass.b),
+ Bundle('b3', MyClass.x, MyClass.y)
+ )
+
+ q = sess.query(b1).filter(
+ b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
+
+ .. seealso::
+
+ :attr:`.Bundle.c`
+
+ """
+
+ c = None
+ """An alias for :attr:`.Bundle.columns`."""
+
+ def _clone(self):
+ cloned = self.__class__.__new__(self.__class__)
+ cloned.__dict__.update(self.__dict__)
+ return cloned
+
+ def __clause_element__(self):
+ # ensure existing entity_namespace remains
+ annotations = {"bundle": self, "entity_namespace": self}
+ annotations.update(self._annotations)
+
+ plugin_subject = self.exprs[0]._propagate_attrs.get(
+ "plugin_subject", self.entity
+ )
+ return (
+ expression.ClauseList(
+ _literal_as_text_role=roles.ColumnsClauseRole,
+ group=False,
+ *[e._annotations.get("bundle", e) for e in self.exprs]
+ )
+ ._annotate(annotations)
+ ._set_propagate_attrs(
+ # the Bundle *must* use the orm plugin no matter what. the
+ # subject can be None but it's much better if it's not.
+ {
+ "compile_state_plugin": "orm",
+ "plugin_subject": plugin_subject,
+ }
+ )
+ )
+
+ @property
+ def clauses(self):
+ return self.__clause_element__().clauses
+
+ def label(self, name):
+ """Provide a copy of this :class:`.Bundle` passing a new label."""
+
+ cloned = self._clone()
+ cloned.name = name
+ return cloned
+
+ def create_row_processor(self, query, procs, labels):
+ """Produce the "row processing" function for this :class:`.Bundle`.
+
+ May be overridden by subclasses.
+
+ .. seealso::
+
+ :ref:`bundles` - includes an example of subclassing.
+
+ """
+ keyed_tuple = result_tuple(labels, [() for l in labels])
+
+ def proc(row):
+ return keyed_tuple([proc(row) for proc in procs])
+
+ return proc
+
+
+def _orm_annotate(element, exclude=None):
+ """Deep copy the given ClauseElement, annotating each element with the
+ "_orm_adapt" flag.
+
+ Elements within the exclude collection will be cloned but not annotated.
+
+ """
+ return sql_util._deep_annotate(element, {"_orm_adapt": True}, exclude)
+
+
+def _orm_deannotate(element):
+ """Remove annotations that link a column to a particular mapping.
+
+ Note this doesn't affect "remote" and "foreign" annotations
+ passed by the :func:`_orm.foreign` and :func:`_orm.remote`
+ annotators.
+
+ """
+
+ return sql_util._deep_deannotate(
+ element, values=("_orm_adapt", "parententity")
+ )
+
+
+def _orm_full_deannotate(element):
+ return sql_util._deep_deannotate(element)
+
+
+class _ORMJoin(expression.Join):
+ """Extend Join to support ORM constructs as input."""
+
+ __visit_name__ = expression.Join.__visit_name__
+
+ inherit_cache = True
+
+ def __init__(
+ self,
+ left,
+ right,
+ onclause=None,
+ isouter=False,
+ full=False,
+ _left_memo=None,
+ _right_memo=None,
+ _extra_criteria=(),
+ ):
+ left_info = inspection.inspect(left)
+
+ right_info = inspection.inspect(right)
+ adapt_to = right_info.selectable
+
+ # used by joined eager loader
+ self._left_memo = _left_memo
+ self._right_memo = _right_memo
+
+ # legacy, for string attr name ON clause. if that's removed
+ # then the "_joined_from_info" concept can go
+ left_orm_info = getattr(left, "_joined_from_info", left_info)
+ self._joined_from_info = right_info
+ if isinstance(onclause, util.string_types):
+ onclause = getattr(left_orm_info.entity, onclause)
+ # ####
+
+ if isinstance(onclause, attributes.QueryableAttribute):
+ on_selectable = onclause.comparator._source_selectable()
+ prop = onclause.property
+ _extra_criteria += onclause._extra_criteria
+ elif isinstance(onclause, MapperProperty):
+ # used internally by joined eager loader...possibly not ideal
+ prop = onclause
+ on_selectable = prop.parent.selectable
+ else:
+ prop = None
+
+ if prop:
+ left_selectable = left_info.selectable
+
+ if sql_util.clause_is_present(on_selectable, left_selectable):
+ adapt_from = on_selectable
+ else:
+ adapt_from = left_selectable
+
+ (
+ pj,
+ sj,
+ source,
+ dest,
+ secondary,
+ target_adapter,
+ ) = prop._create_joins(
+ source_selectable=adapt_from,
+ dest_selectable=adapt_to,
+ source_polymorphic=True,
+ of_type_entity=right_info,
+ alias_secondary=True,
+ extra_criteria=_extra_criteria,
+ )
+
+ if sj is not None:
+ if isouter:
+ # note this is an inner join from secondary->right
+ right = sql.join(secondary, right, sj)
+ onclause = pj
+ else:
+ left = sql.join(left, secondary, pj, isouter)
+ onclause = sj
+ else:
+ onclause = pj
+
+ self._target_adapter = target_adapter
+
+ augment_onclause = onclause is None and _extra_criteria
+ expression.Join.__init__(self, left, right, onclause, isouter, full)
+
+ if augment_onclause:
+ self.onclause &= sql.and_(*_extra_criteria)
+
+ if (
+ not prop
+ and getattr(right_info, "mapper", None)
+ and right_info.mapper.single
+ ):
+ # if single inheritance target and we are using a manual
+ # or implicit ON clause, augment it the same way we'd augment the
+ # WHERE.
+ single_crit = right_info.mapper._single_table_criterion
+ if single_crit is not None:
+ if right_info.is_aliased_class:
+ single_crit = right_info._adapter.traverse(single_crit)
+ self.onclause = self.onclause & single_crit
+
+ def _splice_into_center(self, other):
+ """Splice a join into the center.
+
+ Given join(a, b) and join(b, c), return join(a, b).join(c)
+
+ """
+ leftmost = other
+ while isinstance(leftmost, sql.Join):
+ leftmost = leftmost.left
+
+ assert self.right is leftmost
+
+ left = _ORMJoin(
+ self.left,
+ other.left,
+ self.onclause,
+ isouter=self.isouter,
+ _left_memo=self._left_memo,
+ _right_memo=other._left_memo,
+ )
+
+ return _ORMJoin(
+ left,
+ other.right,
+ other.onclause,
+ isouter=other.isouter,
+ _right_memo=other._right_memo,
+ )
+
+ def join(
+ self,
+ right,
+ onclause=None,
+ isouter=False,
+ full=False,
+ join_to_left=None,
+ ):
+ return _ORMJoin(self, right, onclause, full=full, isouter=isouter)
+
+ def outerjoin(self, right, onclause=None, full=False, join_to_left=None):
+ return _ORMJoin(self, right, onclause, isouter=True, full=full)
+
+
+def join(
+ left, right, onclause=None, isouter=False, full=False, join_to_left=None
+):
+ r"""Produce an inner join between left and right clauses.
+
+ :func:`_orm.join` is an extension to the core join interface
+ provided by :func:`_expression.join()`, where the
+ left and right selectables may be not only core selectable
+ objects such as :class:`_schema.Table`, but also mapped classes or
+ :class:`.AliasedClass` instances. The "on" clause can
+ be a SQL expression or an ORM mapped attribute
+ referencing a configured :func:`_orm.relationship`.
+
+ .. deprecated:: 1.4 using a string relationship name for the "onclause"
+ is deprecated and will be removed in 2.0; the onclause may be only
+ an ORM-mapped relationship attribute or a SQL expression construct.
+
+ :func:`_orm.join` is not commonly needed in modern usage,
+ as its functionality is encapsulated within that of the
+ :meth:`_sql.Select.join` and :meth:`_query.Query.join`
+ methods. which feature a
+ significant amount of automation beyond :func:`_orm.join`
+ by itself. Explicit use of :func:`_orm.join`
+ with ORM-enabled SELECT statements involves use of the
+ :meth:`_sql.Select.select_from` method, as in::
+
+ from sqlalchemy.orm import join
+ stmt = select(User).\
+ select_from(join(User, Address, User.addresses)).\
+ filter(Address.email_address=='foo@bar.com')
+
+ In modern SQLAlchemy the above join can be written more
+ succinctly as::
+
+ stmt = select(User).\
+ join(User.addresses).\
+ filter(Address.email_address=='foo@bar.com')
+
+ See :ref:`orm_queryguide_joins` for information on modern usage
+ of ORM level joins.
+
+ .. deprecated:: 0.8
+
+ the ``join_to_left`` parameter is deprecated, and will be removed
+ in a future release. The parameter has no effect.
+
+ """
+ return _ORMJoin(left, right, onclause, isouter, full)
+
+
+def outerjoin(left, right, onclause=None, full=False, join_to_left=None):
+ """Produce a left outer join between left and right clauses.
+
+ This is the "outer join" version of the :func:`_orm.join` function,
+ featuring the same behavior except that an OUTER JOIN is generated.
+ See that function's documentation for other usage details.
+
+ """
+ return _ORMJoin(left, right, onclause, True, full)
+
+
+def with_parent(instance, prop, from_entity=None):
+ """Create filtering criterion that relates this query's primary entity
+ to the given related instance, using established
+ :func:`_orm.relationship()`
+ configuration.
+
+ E.g.::
+
+ stmt = select(Address).where(with_parent(some_user, User.addresses))
+
+
+ The SQL rendered is the same as that rendered when a lazy loader
+ would fire off from the given parent on that attribute, meaning
+ that the appropriate state is taken from the parent object in
+ Python without the need to render joins to the parent table
+ in the rendered statement.
+
+ The given property may also make use of :meth:`_orm.PropComparator.of_type`
+ to indicate the left side of the criteria::
+
+
+ a1 = aliased(Address)
+ a2 = aliased(Address)
+ stmt = select(a1, a2).where(
+ with_parent(u1, User.addresses.of_type(a2))
+ )
+
+ The above use is equivalent to using the
+ :func:`_orm.with_parent.from_entity` argument::
+
+ a1 = aliased(Address)
+ a2 = aliased(Address)
+ stmt = select(a1, a2).where(
+ with_parent(u1, User.addresses, from_entity=a2)
+ )
+
+ :param instance:
+ An instance which has some :func:`_orm.relationship`.
+
+ :param property:
+ String property name, or class-bound attribute, which indicates
+ what relationship from the instance should be used to reconcile the
+ parent/child relationship.
+
+ .. deprecated:: 1.4 Using strings is deprecated and will be removed
+ in SQLAlchemy 2.0. Please use the class-bound attribute directly.
+
+ :param from_entity:
+ Entity in which to consider as the left side. This defaults to the
+ "zero" entity of the :class:`_query.Query` itself.
+
+ .. versionadded:: 1.2
+
+ """
+ if isinstance(prop, util.string_types):
+ util.warn_deprecated_20(
+ "Using strings to indicate relationship names in the ORM "
+ "with_parent() function is deprecated and will be removed "
+ "SQLAlchemy 2.0. Please use the class-bound attribute directly."
+ )
+ mapper = object_mapper(instance)
+ prop = getattr(mapper.class_, prop).property
+ elif isinstance(prop, attributes.QueryableAttribute):
+ if prop._of_type:
+ from_entity = prop._of_type
+ prop = prop.property
+
+ return prop._with_parent(instance, from_entity=from_entity)
+
+
+def has_identity(object_):
+ """Return True if the given object has a database
+ identity.
+
+ This typically corresponds to the object being
+ in either the persistent or detached state.
+
+ .. seealso::
+
+ :func:`.was_deleted`
+
+ """
+ state = attributes.instance_state(object_)
+ return state.has_identity
+
+
+def was_deleted(object_):
+ """Return True if the given object was deleted
+ within a session flush.
+
+ This is regardless of whether or not the object is
+ persistent or detached.
+
+ .. seealso::
+
+ :attr:`.InstanceState.was_deleted`
+
+ """
+
+ state = attributes.instance_state(object_)
+ return state.was_deleted
+
+
+def _entity_corresponds_to(given, entity):
+ """determine if 'given' corresponds to 'entity', in terms
+ of an entity passed to Query that would match the same entity
+ being referred to elsewhere in the query.
+
+ """
+ if entity.is_aliased_class:
+ if given.is_aliased_class:
+ if entity._base_alias() is given._base_alias():
+ return True
+ return False
+ elif given.is_aliased_class:
+ if given._use_mapper_path:
+ return entity in given.with_polymorphic_mappers
+ else:
+ return entity is given
+
+ return entity.common_parent(given)
+
+
+def _entity_corresponds_to_use_path_impl(given, entity):
+ """determine if 'given' corresponds to 'entity', in terms
+ of a path of loader options where a mapped attribute is taken to
+ be a member of a parent entity.
+
+ e.g.::
+
+ someoption(A).someoption(A.b) # -> fn(A, A) -> True
+ someoption(A).someoption(C.d) # -> fn(A, C) -> False
+
+ a1 = aliased(A)
+ someoption(a1).someoption(A.b) # -> fn(a1, A) -> False
+ someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True
+
+ wp = with_polymorphic(A, [A1, A2])
+ someoption(wp).someoption(A1.foo) # -> fn(wp, A1) -> False
+ someoption(wp).someoption(wp.A1.foo) # -> fn(wp, wp.A1) -> True
+
+
+ """
+ if given.is_aliased_class:
+ return (
+ entity.is_aliased_class
+ and not entity._use_mapper_path
+ and (given is entity or given in entity._with_polymorphic_entities)
+ )
+ elif not entity.is_aliased_class:
+ return given.common_parent(entity.mapper)
+ else:
+ return (
+ entity._use_mapper_path
+ and given in entity.with_polymorphic_mappers
+ )
+
+
+def _entity_isa(given, mapper):
+ """determine if 'given' "is a" mapper, in terms of the given
+ would load rows of type 'mapper'.
+
+ """
+ if given.is_aliased_class:
+ return mapper in given.with_polymorphic_mappers or given.mapper.isa(
+ mapper
+ )
+ elif given.with_polymorphic_mappers:
+ return mapper in given.with_polymorphic_mappers
+ else:
+ return given.isa(mapper)
+
+
+def randomize_unitofwork():
+ """Use random-ordering sets within the unit of work in order
+ to detect unit of work sorting issues.
+
+ This is a utility function that can be used to help reproduce
+ inconsistent unit of work sorting issues. For example,
+ if two kinds of objects A and B are being inserted, and
+ B has a foreign key reference to A - the A must be inserted first.
+ However, if there is no relationship between A and B, the unit of work
+ won't know to perform this sorting, and an operation may or may not
+ fail, depending on how the ordering works out. Since Python sets
+ and dictionaries have non-deterministic ordering, such an issue may
+ occur on some runs and not on others, and in practice it tends to
+ have a great dependence on the state of the interpreter. This leads
+ to so-called "heisenbugs" where changing entirely irrelevant aspects
+ of the test program still cause the failure behavior to change.
+
+ By calling ``randomize_unitofwork()`` when a script first runs, the
+ ordering of a key series of sets within the unit of work implementation
+ are randomized, so that the script can be minimized down to the
+ fundamental mapping and operation that's failing, while still reproducing
+ the issue on at least some runs.
+
+ This utility is also available when running the test suite via the
+ ``--reversetop`` flag.
+
+ """
+ from sqlalchemy.orm import unitofwork, session, mapper, dependency
+ from sqlalchemy.util import topological
+ from sqlalchemy.testing.util import RandomSet
+
+ topological.set = (
+ unitofwork.set
+ ) = session.set = mapper.set = dependency.set = RandomSet
+
+
+def _getitem(iterable_query, item, allow_negative):
+ """calculate __getitem__ in terms of an iterable query object
+ that also has a slice() method.
+
+ """
+
+ def _no_negative_indexes():
+ if not allow_negative:
+ raise IndexError(
+ "negative indexes are not accepted by SQL "
+ "index / slice operators"
+ )
+ else:
+ util.warn_deprecated_20(
+ "Support for negative indexes for SQL index / slice operators "
+ "will be "
+ "removed in 2.0; these operators fetch the complete result "
+ "and do not work efficiently."
+ )
+
+ if isinstance(item, slice):
+ start, stop, step = util.decode_slice(item)
+
+ if (
+ isinstance(stop, int)
+ and isinstance(start, int)
+ and stop - start <= 0
+ ):
+ return []
+
+ elif (isinstance(start, int) and start < 0) or (
+ isinstance(stop, int) and stop < 0
+ ):
+ _no_negative_indexes()
+ return list(iterable_query)[item]
+
+ res = iterable_query.slice(start, stop)
+ if step is not None:
+ return list(res)[None : None : item.step]
+ else:
+ return list(res)
+ else:
+ if item == -1:
+ _no_negative_indexes()
+ return list(iterable_query)[-1]
+ else:
+ return list(iterable_query[item : item + 1])[0]