summaryrefslogtreecommitdiffstats
path: root/lib/sqlalchemy/pool
diff options
context:
space:
mode:
authorxiubuzhe <xiubuzhe@sina.com>2023-10-08 20:59:00 +0800
committerxiubuzhe <xiubuzhe@sina.com>2023-10-08 20:59:00 +0800
commit1dac2263372df2b85db5d029a45721fa158a5c9d (patch)
tree0365f9c57df04178a726d7584ca6a6b955a7ce6a /lib/sqlalchemy/pool
parentb494be364bb39e1de128ada7dc576a729d99907e (diff)
downloadsunhpc-1dac2263372df2b85db5d029a45721fa158a5c9d.tar.gz
sunhpc-1dac2263372df2b85db5d029a45721fa158a5c9d.tar.bz2
sunhpc-1dac2263372df2b85db5d029a45721fa158a5c9d.zip
first add files
Diffstat (limited to 'lib/sqlalchemy/pool')
-rw-r--r--lib/sqlalchemy/pool/__init__.py56
-rw-r--r--lib/sqlalchemy/pool/base.py1121
-rw-r--r--lib/sqlalchemy/pool/dbapi_proxy.py147
-rw-r--r--lib/sqlalchemy/pool/events.py284
-rw-r--r--lib/sqlalchemy/pool/impl.py514
5 files changed, 2122 insertions, 0 deletions
diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py
new file mode 100644
index 0000000..6a00ef8
--- /dev/null
+++ b/lib/sqlalchemy/pool/__init__.py
@@ -0,0 +1,56 @@
+# sqlalchemy/pool/__init__.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+
+"""Connection pooling for DB-API connections.
+
+Provides a number of connection pool implementations for a variety of
+usage scenarios and thread behavior requirements imposed by the
+application, DB-API or database itself.
+
+Also provides a DB-API 2.0 connection proxying mechanism allowing
+regular DB-API connect() methods to be transparently managed by a
+SQLAlchemy connection pool.
+"""
+
+from . import events
+from .base import _ConnectionFairy
+from .base import _ConnectionRecord
+from .base import _finalize_fairy
+from .base import Pool
+from .base import reset_commit
+from .base import reset_none
+from .base import reset_rollback
+from .dbapi_proxy import clear_managers
+from .dbapi_proxy import manage
+from .impl import AssertionPool
+from .impl import AsyncAdaptedQueuePool
+from .impl import FallbackAsyncAdaptedQueuePool
+from .impl import NullPool
+from .impl import QueuePool
+from .impl import SingletonThreadPool
+from .impl import StaticPool
+
+
+__all__ = [
+ "Pool",
+ "reset_commit",
+ "reset_none",
+ "reset_rollback",
+ "clear_managers",
+ "manage",
+ "AssertionPool",
+ "NullPool",
+ "QueuePool",
+ "AsyncAdaptedQueuePool",
+ "FallbackAsyncAdaptedQueuePool",
+ "SingletonThreadPool",
+ "StaticPool",
+]
+
+# as these are likely to be used in various test suites, debugging
+# setups, keep them in the sqlalchemy.pool namespace
diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py
new file mode 100644
index 0000000..cde28c2
--- /dev/null
+++ b/lib/sqlalchemy/pool/base.py
@@ -0,0 +1,1121 @@
+# sqlalchemy/pool.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+
+"""Base constructs for connection pools.
+
+"""
+
+from collections import deque
+import time
+import weakref
+
+from .. import event
+from .. import exc
+from .. import log
+from .. import util
+
+
+reset_rollback = util.symbol("reset_rollback")
+reset_commit = util.symbol("reset_commit")
+reset_none = util.symbol("reset_none")
+
+
+class _ConnDialect(object):
+ """partial implementation of :class:`.Dialect`
+ which provides DBAPI connection methods.
+
+ When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`,
+ the :class:`_engine.Engine` replaces this with its own
+ :class:`.Dialect`.
+
+ """
+
+ is_async = False
+
+ def do_rollback(self, dbapi_connection):
+ dbapi_connection.rollback()
+
+ def do_commit(self, dbapi_connection):
+ dbapi_connection.commit()
+
+ def do_close(self, dbapi_connection):
+ dbapi_connection.close()
+
+ def do_ping(self, dbapi_connection):
+ raise NotImplementedError(
+ "The ping feature requires that a dialect is "
+ "passed to the connection pool."
+ )
+
+ def get_driver_connection(self, connection):
+ return connection
+
+
+class _AsyncConnDialect(_ConnDialect):
+ is_async = True
+
+
+class Pool(log.Identified):
+
+ """Abstract base class for connection pools."""
+
+ _dialect = _ConnDialect()
+
+ def __init__(
+ self,
+ creator,
+ recycle=-1,
+ echo=None,
+ logging_name=None,
+ reset_on_return=True,
+ events=None,
+ dialect=None,
+ pre_ping=False,
+ _dispatch=None,
+ ):
+ """
+ Construct a Pool.
+
+ :param creator: a callable function that returns a DB-API
+ connection object. The function will be called with
+ parameters.
+
+ :param recycle: If set to a value other than -1, number of
+ seconds between connection recycling, which means upon
+ checkout, if this timeout is surpassed the connection will be
+ closed and replaced with a newly opened connection. Defaults to -1.
+
+ :param logging_name: String identifier which will be used within
+ the "name" field of logging records generated within the
+ "sqlalchemy.pool" logger. Defaults to a hexstring of the object's
+ id.
+
+ :param echo: if True, the connection pool will log
+ informational output such as when connections are invalidated
+ as well as when connections are recycled to the default log handler,
+ which defaults to ``sys.stdout`` for output.. If set to the string
+ ``"debug"``, the logging will include pool checkouts and checkins.
+
+ The :paramref:`_pool.Pool.echo` parameter can also be set from the
+ :func:`_sa.create_engine` call by using the
+ :paramref:`_sa.create_engine.echo_pool` parameter.
+
+ .. seealso::
+
+ :ref:`dbengine_logging` - further detail on how to configure
+ logging.
+
+ :param reset_on_return: Determine steps to take on
+ connections as they are returned to the pool, which were
+ not otherwise handled by a :class:`_engine.Connection`.
+
+ reset_on_return can have any of these values:
+
+ * ``"rollback"`` - call rollback() on the connection,
+ to release locks and transaction resources.
+ This is the default value. The vast majority
+ of use cases should leave this value set.
+ * ``True`` - same as 'rollback', this is here for
+ backwards compatibility.
+ * ``"commit"`` - call commit() on the connection,
+ to release locks and transaction resources.
+ A commit here may be desirable for databases that
+ cache query plans if a commit is emitted,
+ such as Microsoft SQL Server. However, this
+ value is more dangerous than 'rollback' because
+ any data changes present on the transaction
+ are committed unconditionally.
+ * ``None`` - don't do anything on the connection.
+ This setting is only appropriate if the database / DBAPI
+ works in pure "autocommit" mode at all times, or if the
+ application uses the :class:`_engine.Engine` with consistent
+ connectivity patterns. See the section
+ :ref:`pool_reset_on_return` for more details.
+
+ * ``False`` - same as None, this is here for
+ backwards compatibility.
+
+ .. seealso::
+
+ :ref:`pool_reset_on_return`
+
+ :param events: a list of 2-tuples, each of the form
+ ``(callable, target)`` which will be passed to :func:`.event.listen`
+ upon construction. Provided here so that event listeners
+ can be assigned via :func:`_sa.create_engine` before dialect-level
+ listeners are applied.
+
+ :param dialect: a :class:`.Dialect` that will handle the job
+ of calling rollback(), close(), or commit() on DBAPI connections.
+ If omitted, a built-in "stub" dialect is used. Applications that
+ make use of :func:`_sa.create_engine` should not use this parameter
+ as it is handled by the engine creation strategy.
+
+ .. versionadded:: 1.1 - ``dialect`` is now a public parameter
+ to the :class:`_pool.Pool`.
+
+ :param pre_ping: if True, the pool will emit a "ping" (typically
+ "SELECT 1", but is dialect-specific) on the connection
+ upon checkout, to test if the connection is alive or not. If not,
+ the connection is transparently re-connected and upon success, all
+ other pooled connections established prior to that timestamp are
+ invalidated. Requires that a dialect is passed as well to
+ interpret the disconnection error.
+
+ .. versionadded:: 1.2
+
+ """
+ if logging_name:
+ self.logging_name = self._orig_logging_name = logging_name
+ else:
+ self._orig_logging_name = None
+
+ log.instance_logger(self, echoflag=echo)
+ self._creator = creator
+ self._recycle = recycle
+ self._invalidate_time = 0
+ self._pre_ping = pre_ping
+ self._reset_on_return = util.symbol.parse_user_argument(
+ reset_on_return,
+ {
+ reset_rollback: ["rollback", True],
+ reset_none: ["none", None, False],
+ reset_commit: ["commit"],
+ },
+ "reset_on_return",
+ resolve_symbol_names=False,
+ )
+
+ self.echo = echo
+
+ if _dispatch:
+ self.dispatch._update(_dispatch, only_propagate=False)
+ if dialect:
+ self._dialect = dialect
+ if events:
+ for fn, target in events:
+ event.listen(self, target, fn)
+
+ @util.hybridproperty
+ def _is_asyncio(self):
+ return self._dialect.is_async
+
+ @property
+ def _creator(self):
+ return self.__dict__["_creator"]
+
+ @_creator.setter
+ def _creator(self, creator):
+ self.__dict__["_creator"] = creator
+ self._invoke_creator = self._should_wrap_creator(creator)
+
+ def _should_wrap_creator(self, creator):
+ """Detect if creator accepts a single argument, or is sent
+ as a legacy style no-arg function.
+
+ """
+
+ try:
+ argspec = util.get_callable_argspec(self._creator, no_self=True)
+ except TypeError:
+ return lambda crec: creator()
+
+ defaulted = argspec[3] is not None and len(argspec[3]) or 0
+ positionals = len(argspec[0]) - defaulted
+
+ # look for the exact arg signature that DefaultStrategy
+ # sends us
+ if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
+ return creator
+ # or just a single positional
+ elif positionals == 1:
+ return creator
+ # all other cases, just wrap and assume legacy "creator" callable
+ # thing
+ else:
+ return lambda crec: creator()
+
+ def _close_connection(self, connection):
+ self.logger.debug("Closing connection %r", connection)
+
+ try:
+ self._dialect.do_close(connection)
+ except Exception:
+ self.logger.error(
+ "Exception closing connection %r", connection, exc_info=True
+ )
+
+ def _create_connection(self):
+ """Called by subclasses to create a new ConnectionRecord."""
+
+ return _ConnectionRecord(self)
+
+ def _invalidate(self, connection, exception=None, _checkin=True):
+ """Mark all connections established within the generation
+ of the given connection as invalidated.
+
+ If this pool's last invalidate time is before when the given
+ connection was created, update the timestamp til now. Otherwise,
+ no action is performed.
+
+ Connections with a start time prior to this pool's invalidation
+ time will be recycled upon next checkout.
+ """
+ rec = getattr(connection, "_connection_record", None)
+ if not rec or self._invalidate_time < rec.starttime:
+ self._invalidate_time = time.time()
+ if _checkin and getattr(connection, "is_valid", False):
+ connection.invalidate(exception)
+
+ def recreate(self):
+ """Return a new :class:`_pool.Pool`, of the same class as this one
+ and configured with identical creation arguments.
+
+ This method is used in conjunction with :meth:`dispose`
+ to close out an entire :class:`_pool.Pool` and create a new one in
+ its place.
+
+ """
+
+ raise NotImplementedError()
+
+ def dispose(self):
+ """Dispose of this pool.
+
+ This method leaves the possibility of checked-out connections
+ remaining open, as it only affects connections that are
+ idle in the pool.
+
+ .. seealso::
+
+ :meth:`Pool.recreate`
+
+ """
+
+ raise NotImplementedError()
+
+ def connect(self):
+ """Return a DBAPI connection from the pool.
+
+ The connection is instrumented such that when its
+ ``close()`` method is called, the connection will be returned to
+ the pool.
+
+ """
+ return _ConnectionFairy._checkout(self)
+
+ def _return_conn(self, record):
+ """Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
+
+ This method is called when an instrumented DBAPI connection
+ has its ``close()`` method called.
+
+ """
+ self._do_return_conn(record)
+
+ def _do_get(self):
+ """Implementation for :meth:`get`, supplied by subclasses."""
+
+ raise NotImplementedError()
+
+ def _do_return_conn(self, conn):
+ """Implementation for :meth:`return_conn`, supplied by subclasses."""
+
+ raise NotImplementedError()
+
+ def status(self):
+ raise NotImplementedError()
+
+
+class _ConnectionRecord(object):
+
+ """Internal object which maintains an individual DBAPI connection
+ referenced by a :class:`_pool.Pool`.
+
+ The :class:`._ConnectionRecord` object always exists for any particular
+ DBAPI connection whether or not that DBAPI connection has been
+ "checked out". This is in contrast to the :class:`._ConnectionFairy`
+ which is only a public facade to the DBAPI connection while it is checked
+ out.
+
+ A :class:`._ConnectionRecord` may exist for a span longer than that
+ of a single DBAPI connection. For example, if the
+ :meth:`._ConnectionRecord.invalidate`
+ method is called, the DBAPI connection associated with this
+ :class:`._ConnectionRecord`
+ will be discarded, but the :class:`._ConnectionRecord` may be used again,
+ in which case a new DBAPI connection is produced when the
+ :class:`_pool.Pool`
+ next uses this record.
+
+ The :class:`._ConnectionRecord` is delivered along with connection
+ pool events, including :meth:`_events.PoolEvents.connect` and
+ :meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord`
+ still
+ remains an internal object whose API and internals may change.
+
+ .. seealso::
+
+ :class:`._ConnectionFairy`
+
+ """
+
+ def __init__(self, pool, connect=True):
+ self.__pool = pool
+ if connect:
+ self.__connect()
+ self.finalize_callback = deque()
+
+ fresh = False
+
+ fairy_ref = None
+
+ starttime = None
+
+ dbapi_connection = None
+ """A reference to the actual DBAPI connection being tracked.
+
+ May be ``None`` if this :class:`._ConnectionRecord` has been marked
+ as invalidated; a new DBAPI connection may replace it if the owning
+ pool calls upon this :class:`._ConnectionRecord` to reconnect.
+
+ For adapted drivers, like the Asyncio implementations, this is a
+ :class:`.AdaptedConnection` that adapts the driver connection
+ to the DBAPI protocol.
+ Use :attr:`._ConnectionRecord.driver_connection` to obtain the
+ connection objected returned by the driver.
+
+ .. versionadded:: 1.4.24
+
+ """
+
+ @property
+ def driver_connection(self):
+ """The connection object as returned by the driver after a connect.
+
+ For normal sync drivers that support the DBAPI protocol, this object
+ is the same as the one referenced by
+ :attr:`._ConnectionRecord.dbapi_connection`.
+
+ For adapted drivers, like the Asyncio ones, this is the actual object
+ that was returned by the driver ``connect`` call.
+
+ As :attr:`._ConnectionRecord.dbapi_connection` it may be ``None``
+ if this :class:`._ConnectionRecord` has been marked as invalidated.
+
+ .. versionadded:: 1.4.24
+
+ """
+
+ if self.dbapi_connection is None:
+ return None
+ else:
+ return self.__pool._dialect.get_driver_connection(
+ self.dbapi_connection
+ )
+
+ @property
+ def connection(self):
+ """An alias to :attr:`._ConnectionRecord.dbapi_connection`.
+
+ This alias is deprecated, please use the new name.
+
+ .. deprecated:: 1.4.24
+
+ """
+ return self.dbapi_connection
+
+ @connection.setter
+ def connection(self, value):
+ self.dbapi_connection = value
+
+ _soft_invalidate_time = 0
+
+ @util.memoized_property
+ def info(self):
+ """The ``.info`` dictionary associated with the DBAPI connection.
+
+ This dictionary is shared among the :attr:`._ConnectionFairy.info`
+ and :attr:`_engine.Connection.info` accessors.
+
+ .. note::
+
+ The lifespan of this dictionary is linked to the
+ DBAPI connection itself, meaning that it is **discarded** each time
+ the DBAPI connection is closed and/or invalidated. The
+ :attr:`._ConnectionRecord.record_info` dictionary remains
+ persistent throughout the lifespan of the
+ :class:`._ConnectionRecord` container.
+
+ """
+ return {}
+
+ @util.memoized_property
+ def record_info(self):
+ """An "info' dictionary associated with the connection record
+ itself.
+
+ Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
+ to the lifespan of the DBAPI connection, this dictionary is linked
+ to the lifespan of the :class:`._ConnectionRecord` container itself
+ and will remain persistent throughout the life of the
+ :class:`._ConnectionRecord`.
+
+ .. versionadded:: 1.1
+
+ """
+ return {}
+
+ @classmethod
+ def checkout(cls, pool):
+ rec = pool._do_get()
+ try:
+ dbapi_connection = rec.get_connection()
+ except Exception as err:
+ with util.safe_reraise():
+ rec._checkin_failed(err, _fairy_was_created=False)
+ echo = pool._should_log_debug()
+ fairy = _ConnectionFairy(dbapi_connection, rec, echo)
+
+ rec.fairy_ref = ref = weakref.ref(
+ fairy,
+ lambda ref: _finalize_fairy
+ and _finalize_fairy(None, rec, pool, ref, echo, True),
+ )
+ _strong_ref_connection_records[ref] = rec
+ if echo:
+ pool.logger.debug(
+ "Connection %r checked out from pool", dbapi_connection
+ )
+ return fairy
+
+ def _checkin_failed(self, err, _fairy_was_created=True):
+ self.invalidate(e=err)
+ self.checkin(
+ _fairy_was_created=_fairy_was_created,
+ )
+
+ def checkin(self, _fairy_was_created=True):
+ if self.fairy_ref is None and _fairy_was_created:
+ # _fairy_was_created is False for the initial get connection phase;
+ # meaning there was no _ConnectionFairy and we must unconditionally
+ # do a checkin.
+ #
+ # otherwise, if fairy_was_created==True, if fairy_ref is None here
+ # that means we were checked in already, so this looks like
+ # a double checkin.
+ util.warn("Double checkin attempted on %s" % self)
+ return
+ self.fairy_ref = None
+ connection = self.dbapi_connection
+ pool = self.__pool
+ while self.finalize_callback:
+ finalizer = self.finalize_callback.pop()
+ finalizer(connection)
+ if pool.dispatch.checkin:
+ pool.dispatch.checkin(connection, self)
+
+ pool._return_conn(self)
+
+ @property
+ def in_use(self):
+ return self.fairy_ref is not None
+
+ @property
+ def last_connect_time(self):
+ return self.starttime
+
+ def close(self):
+ if self.dbapi_connection is not None:
+ self.__close()
+
+ def invalidate(self, e=None, soft=False):
+ """Invalidate the DBAPI connection held by this
+ :class:`._ConnectionRecord`.
+
+ This method is called for all connection invalidations, including
+ when the :meth:`._ConnectionFairy.invalidate` or
+ :meth:`_engine.Connection.invalidate` methods are called,
+ as well as when any
+ so-called "automatic invalidation" condition occurs.
+
+ :param e: an exception object indicating a reason for the
+ invalidation.
+
+ :param soft: if True, the connection isn't closed; instead, this
+ connection will be recycled on next checkout.
+
+ .. versionadded:: 1.0.3
+
+ .. seealso::
+
+ :ref:`pool_connection_invalidation`
+
+ """
+ # already invalidated
+ if self.dbapi_connection is None:
+ return
+ if soft:
+ self.__pool.dispatch.soft_invalidate(
+ self.dbapi_connection, self, e
+ )
+ else:
+ self.__pool.dispatch.invalidate(self.dbapi_connection, self, e)
+ if e is not None:
+ self.__pool.logger.info(
+ "%sInvalidate connection %r (reason: %s:%s)",
+ "Soft " if soft else "",
+ self.dbapi_connection,
+ e.__class__.__name__,
+ e,
+ )
+ else:
+ self.__pool.logger.info(
+ "%sInvalidate connection %r",
+ "Soft " if soft else "",
+ self.dbapi_connection,
+ )
+
+ if soft:
+ self._soft_invalidate_time = time.time()
+ else:
+ self.__close()
+ self.dbapi_connection = None
+
+ def get_connection(self):
+ recycle = False
+
+ # NOTE: the various comparisons here are assuming that measurable time
+ # passes between these state changes. however, time.time() is not
+ # guaranteed to have sub-second precision. comparisons of
+ # "invalidation time" to "starttime" should perhaps use >= so that the
+ # state change can take place assuming no measurable time has passed,
+ # however this does not guarantee correct behavior here as if time
+ # continues to not pass, it will try to reconnect repeatedly until
+ # these timestamps diverge, so in that sense using > is safer. Per
+ # https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
+ # within 16 milliseconds accuracy, so unit tests for connection
+ # invalidation need a sleep of at least this long between initial start
+ # time and invalidation for the logic below to work reliably.
+ if self.dbapi_connection is None:
+ self.info.clear()
+ self.__connect()
+ elif (
+ self.__pool._recycle > -1
+ and time.time() - self.starttime > self.__pool._recycle
+ ):
+ self.__pool.logger.info(
+ "Connection %r exceeded timeout; recycling",
+ self.dbapi_connection,
+ )
+ recycle = True
+ elif self.__pool._invalidate_time > self.starttime:
+ self.__pool.logger.info(
+ "Connection %r invalidated due to pool invalidation; "
+ + "recycling",
+ self.dbapi_connection,
+ )
+ recycle = True
+ elif self._soft_invalidate_time > self.starttime:
+ self.__pool.logger.info(
+ "Connection %r invalidated due to local soft invalidation; "
+ + "recycling",
+ self.dbapi_connection,
+ )
+ recycle = True
+
+ if recycle:
+ self.__close()
+ self.info.clear()
+
+ self.__connect()
+ return self.dbapi_connection
+
+ def _is_hard_or_soft_invalidated(self):
+ return (
+ self.dbapi_connection is None
+ or self.__pool._invalidate_time > self.starttime
+ or (self._soft_invalidate_time > self.starttime)
+ )
+
+ def __close(self):
+ self.finalize_callback.clear()
+ if self.__pool.dispatch.close:
+ self.__pool.dispatch.close(self.dbapi_connection, self)
+ self.__pool._close_connection(self.dbapi_connection)
+ self.dbapi_connection = None
+
+ def __connect(self):
+ pool = self.__pool
+
+ # ensure any existing connection is removed, so that if
+ # creator fails, this attribute stays None
+ self.dbapi_connection = None
+ try:
+ self.starttime = time.time()
+ self.dbapi_connection = connection = pool._invoke_creator(self)
+ pool.logger.debug("Created new connection %r", connection)
+ self.fresh = True
+ except Exception as e:
+ with util.safe_reraise():
+ pool.logger.debug("Error on connect(): %s", e)
+ else:
+ # in SQLAlchemy 1.4 the first_connect event is not used by
+ # the engine, so this will usually not be set
+ if pool.dispatch.first_connect:
+ pool.dispatch.first_connect.for_modify(
+ pool.dispatch
+ ).exec_once_unless_exception(self.dbapi_connection, self)
+
+ # init of the dialect now takes place within the connect
+ # event, so ensure a mutex is used on the first run
+ pool.dispatch.connect.for_modify(
+ pool.dispatch
+ )._exec_w_sync_on_first_run(self.dbapi_connection, self)
+
+
+def _finalize_fairy(
+ dbapi_connection,
+ connection_record,
+ pool,
+ ref, # this is None when called directly, not by the gc
+ echo,
+ reset=True,
+ fairy=None,
+):
+ """Cleanup for a :class:`._ConnectionFairy` whether or not it's already
+ been garbage collected.
+
+ When using an async dialect no IO can happen here (without using
+ a dedicated thread), since this is called outside the greenlet
+ context and with an already running loop. In this case function
+ will only log a message and raise a warning.
+ """
+
+ if ref:
+ _strong_ref_connection_records.pop(ref, None)
+ elif fairy:
+ _strong_ref_connection_records.pop(weakref.ref(fairy), None)
+
+ if ref is not None:
+ if connection_record.fairy_ref is not ref:
+ return
+ assert dbapi_connection is None
+ dbapi_connection = connection_record.dbapi_connection
+
+ # null pool is not _is_asyncio but can be used also with async dialects
+ dont_restore_gced = pool._dialect.is_async
+
+ if dont_restore_gced:
+ detach = not connection_record or ref
+ can_manipulate_connection = not ref
+ else:
+ detach = not connection_record
+ can_manipulate_connection = True
+
+ if dbapi_connection is not None:
+ if connection_record and echo:
+ pool.logger.debug(
+ "Connection %r being returned to pool%s",
+ dbapi_connection,
+ ", transaction state was already reset by caller"
+ if not reset
+ else "",
+ )
+
+ try:
+ fairy = fairy or _ConnectionFairy(
+ dbapi_connection,
+ connection_record,
+ echo,
+ )
+ assert fairy.dbapi_connection is dbapi_connection
+ if reset and can_manipulate_connection:
+ fairy._reset(pool)
+
+ if detach:
+ if connection_record:
+ fairy._pool = pool
+ fairy.detach()
+
+ if can_manipulate_connection:
+ if pool.dispatch.close_detached:
+ pool.dispatch.close_detached(dbapi_connection)
+
+ pool._close_connection(dbapi_connection)
+ else:
+ message = (
+ "The garbage collector is trying to clean up "
+ "connection %r. This feature is unsupported on async "
+ "dbapi, since no IO can be performed at this stage to "
+ "reset the connection. Please close out all "
+ "connections when they are no longer used, calling "
+ "``close()`` or using a context manager to "
+ "manage their lifetime."
+ ) % dbapi_connection
+ pool.logger.error(message)
+ util.warn(message)
+
+ except BaseException as e:
+ pool.logger.error(
+ "Exception during reset or similar", exc_info=True
+ )
+ if connection_record:
+ connection_record.invalidate(e=e)
+ if not isinstance(e, Exception):
+ raise
+
+ if connection_record and connection_record.fairy_ref is not None:
+ connection_record.checkin()
+
+
+# a dictionary of the _ConnectionFairy weakrefs to _ConnectionRecord, so that
+# GC under pypy will call ConnectionFairy finalizers. linked directly to the
+# weakref that will empty itself when collected so that it should not create
+# any unmanaged memory references.
+_strong_ref_connection_records = {}
+
+
+class _ConnectionFairy(object):
+
+ """Proxies a DBAPI connection and provides return-on-dereference
+ support.
+
+ This is an internal object used by the :class:`_pool.Pool` implementation
+ to provide context management to a DBAPI connection delivered by
+ that :class:`_pool.Pool`.
+
+ The name "fairy" is inspired by the fact that the
+ :class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
+ only for the length of a specific DBAPI connection being checked out from
+ the pool, and additionally that as a transparent proxy, it is mostly
+ invisible.
+
+ .. seealso::
+
+ :class:`._ConnectionRecord`
+
+ """
+
+ def __init__(self, dbapi_connection, connection_record, echo):
+ self.dbapi_connection = dbapi_connection
+ self._connection_record = connection_record
+ self._echo = echo
+
+ dbapi_connection = None
+ """A reference to the actual DBAPI connection being tracked.
+
+ .. versionadded:: 1.4.24
+
+ .. seealso::
+
+ :attr:`._ConnectionFairy.driver_connection`
+
+ :attr:`._ConnectionRecord.dbapi_connection`
+
+ :ref:`faq_dbapi_connection`
+
+ """
+
+ _connection_record = None
+ """A reference to the :class:`._ConnectionRecord` object associated
+ with the DBAPI connection.
+
+ This is currently an internal accessor which is subject to change.
+
+ """
+
+ @property
+ def driver_connection(self):
+ """The connection object as returned by the driver after a connect.
+
+ .. versionadded:: 1.4.24
+
+ .. seealso::
+
+ :attr:`._ConnectionFairy.dbapi_connection`
+
+ :attr:`._ConnectionRecord.driver_connection`
+
+ :ref:`faq_dbapi_connection`
+
+ """
+ return self._connection_record.driver_connection
+
+ @property
+ def connection(self):
+ """An alias to :attr:`._ConnectionFairy.dbapi_connection`.
+
+ This alias is deprecated, please use the new name.
+
+ .. deprecated:: 1.4.24
+
+ """
+ return self.dbapi_connection
+
+ @connection.setter
+ def connection(self, value):
+ self.dbapi_connection = value
+
+ @classmethod
+ def _checkout(cls, pool, threadconns=None, fairy=None):
+ if not fairy:
+ fairy = _ConnectionRecord.checkout(pool)
+
+ fairy._pool = pool
+ fairy._counter = 0
+
+ if threadconns is not None:
+ threadconns.current = weakref.ref(fairy)
+
+ if fairy.dbapi_connection is None:
+ raise exc.InvalidRequestError("This connection is closed")
+ fairy._counter += 1
+ if (
+ not pool.dispatch.checkout and not pool._pre_ping
+ ) or fairy._counter != 1:
+ return fairy
+
+ # Pool listeners can trigger a reconnection on checkout, as well
+ # as the pre-pinger.
+ # there are three attempts made here, but note that if the database
+ # is not accessible from a connection standpoint, those won't proceed
+ # here.
+ attempts = 2
+ while attempts > 0:
+ connection_is_fresh = fairy._connection_record.fresh
+ fairy._connection_record.fresh = False
+ try:
+ if pool._pre_ping:
+ if not connection_is_fresh:
+ if fairy._echo:
+ pool.logger.debug(
+ "Pool pre-ping on connection %s",
+ fairy.dbapi_connection,
+ )
+ result = pool._dialect.do_ping(fairy.dbapi_connection)
+ if not result:
+ if fairy._echo:
+ pool.logger.debug(
+ "Pool pre-ping on connection %s failed, "
+ "will invalidate pool",
+ fairy.dbapi_connection,
+ )
+ raise exc.InvalidatePoolError()
+ elif fairy._echo:
+ pool.logger.debug(
+ "Connection %s is fresh, skipping pre-ping",
+ fairy.dbapi_connection,
+ )
+
+ pool.dispatch.checkout(
+ fairy.dbapi_connection, fairy._connection_record, fairy
+ )
+ return fairy
+ except exc.DisconnectionError as e:
+ if e.invalidate_pool:
+ pool.logger.info(
+ "Disconnection detected on checkout, "
+ "invalidating all pooled connections prior to "
+ "current timestamp (reason: %r)",
+ e,
+ )
+ fairy._connection_record.invalidate(e)
+ pool._invalidate(fairy, e, _checkin=False)
+ else:
+ pool.logger.info(
+ "Disconnection detected on checkout, "
+ "invalidating individual connection %s (reason: %r)",
+ fairy.dbapi_connection,
+ e,
+ )
+ fairy._connection_record.invalidate(e)
+ try:
+ fairy.dbapi_connection = (
+ fairy._connection_record.get_connection()
+ )
+ except Exception as err:
+ with util.safe_reraise():
+ fairy._connection_record._checkin_failed(
+ err,
+ _fairy_was_created=True,
+ )
+
+ # prevent _ConnectionFairy from being carried
+ # in the stack trace. Do this after the
+ # connection record has been checked in, so that
+ # if the del triggers a finalize fairy, it won't
+ # try to checkin a second time.
+ del fairy
+
+ attempts -= 1
+
+ pool.logger.info("Reconnection attempts exhausted on checkout")
+ fairy.invalidate()
+ raise exc.InvalidRequestError("This connection is closed")
+
+ def _checkout_existing(self):
+ return _ConnectionFairy._checkout(self._pool, fairy=self)
+
+ def _checkin(self, reset=True):
+ _finalize_fairy(
+ self.dbapi_connection,
+ self._connection_record,
+ self._pool,
+ None,
+ self._echo,
+ reset=reset,
+ fairy=self,
+ )
+ self.dbapi_connection = None
+ self._connection_record = None
+
+ _close = _checkin
+
+ def _reset(self, pool):
+ if pool.dispatch.reset:
+ pool.dispatch.reset(self, self._connection_record)
+ if pool._reset_on_return is reset_rollback:
+ if self._echo:
+ pool.logger.debug(
+ "Connection %s rollback-on-return", self.dbapi_connection
+ )
+ pool._dialect.do_rollback(self)
+ elif pool._reset_on_return is reset_commit:
+ if self._echo:
+ pool.logger.debug(
+ "Connection %s commit-on-return",
+ self.dbapi_connection,
+ )
+ pool._dialect.do_commit(self)
+
+ @property
+ def _logger(self):
+ return self._pool.logger
+
+ @property
+ def is_valid(self):
+ """Return True if this :class:`._ConnectionFairy` still refers
+ to an active DBAPI connection."""
+
+ return self.dbapi_connection is not None
+
+ @util.memoized_property
+ def info(self):
+ """Info dictionary associated with the underlying DBAPI connection
+ referred to by this :class:`.ConnectionFairy`, allowing user-defined
+ data to be associated with the connection.
+
+ The data here will follow along with the DBAPI connection including
+ after it is returned to the connection pool and used again
+ in subsequent instances of :class:`._ConnectionFairy`. It is shared
+ with the :attr:`._ConnectionRecord.info` and
+ :attr:`_engine.Connection.info`
+ accessors.
+
+ The dictionary associated with a particular DBAPI connection is
+ discarded when the connection itself is discarded.
+
+ """
+ return self._connection_record.info
+
+ @property
+ def record_info(self):
+ """Info dictionary associated with the :class:`._ConnectionRecord
+ container referred to by this :class:`.ConnectionFairy`.
+
+ Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
+ of this dictionary is persistent across connections that are
+ disconnected and/or invalidated within the lifespan of a
+ :class:`._ConnectionRecord`.
+
+ .. versionadded:: 1.1
+
+ """
+ if self._connection_record:
+ return self._connection_record.record_info
+ else:
+ return None
+
+ def invalidate(self, e=None, soft=False):
+ """Mark this connection as invalidated.
+
+ This method can be called directly, and is also called as a result
+ of the :meth:`_engine.Connection.invalidate` method. When invoked,
+ the DBAPI connection is immediately closed and discarded from
+ further use by the pool. The invalidation mechanism proceeds
+ via the :meth:`._ConnectionRecord.invalidate` internal method.
+
+ :param e: an exception object indicating a reason for the invalidation.
+
+ :param soft: if True, the connection isn't closed; instead, this
+ connection will be recycled on next checkout.
+
+ .. versionadded:: 1.0.3
+
+ .. seealso::
+
+ :ref:`pool_connection_invalidation`
+
+ """
+
+ if self.dbapi_connection is None:
+ util.warn("Can't invalidate an already-closed connection.")
+ return
+ if self._connection_record:
+ self._connection_record.invalidate(e=e, soft=soft)
+ if not soft:
+ self.dbapi_connection = None
+ self._checkin()
+
+ def cursor(self, *args, **kwargs):
+ """Return a new DBAPI cursor for the underlying connection.
+
+ This method is a proxy for the ``connection.cursor()`` DBAPI
+ method.
+
+ """
+ return self.dbapi_connection.cursor(*args, **kwargs)
+
+ def __getattr__(self, key):
+ return getattr(self.dbapi_connection, key)
+
+ def detach(self):
+ """Separate this connection from its Pool.
+
+ This means that the connection will no longer be returned to the
+ pool when closed, and will instead be literally closed. The
+ containing ConnectionRecord is separated from the DB-API connection,
+ and will create a new connection when next used.
+
+ Note that any overall connection limiting constraints imposed by a
+ Pool implementation may be violated after a detach, as the detached
+ connection is removed from the pool's knowledge and control.
+ """
+
+ if self._connection_record is not None:
+ rec = self._connection_record
+ rec.fairy_ref = None
+ rec.dbapi_connection = None
+ # TODO: should this be _return_conn?
+ self._pool._do_return_conn(self._connection_record)
+ self.info = self.info.copy()
+ self._connection_record = None
+
+ if self._pool.dispatch.detach:
+ self._pool.dispatch.detach(self.dbapi_connection, rec)
+
+ def close(self):
+ self._counter -= 1
+ if self._counter == 0:
+ self._checkin()
+
+ def _close_no_reset(self):
+ self._counter -= 1
+ if self._counter == 0:
+ self._checkin(reset=False)
diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py
new file mode 100644
index 0000000..b0c40f2
--- /dev/null
+++ b/lib/sqlalchemy/pool/dbapi_proxy.py
@@ -0,0 +1,147 @@
+# sqlalchemy/pool/dbapi_proxy.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+
+"""DBAPI proxy utility.
+
+Provides transparent connection pooling on top of a Python DBAPI.
+
+This is legacy SQLAlchemy functionality that is not typically used
+today.
+
+"""
+
+from .impl import QueuePool
+from .. import util
+from ..util import threading
+
+proxies = {}
+
+
+@util.deprecated(
+ "1.3",
+ "The :func:`.pool.manage` function is deprecated, and will be "
+ "removed in a future release.",
+)
+def manage(module, **params):
+ r"""Return a proxy for a DB-API module that automatically
+ pools connections.
+
+ Given a DB-API 2.0 module and pool management parameters, returns
+ a proxy for the module that will automatically pool connections,
+ creating new connection pools for each distinct set of connection
+ arguments sent to the decorated module's connect() function.
+
+ :param module: a DB-API 2.0 database module
+
+ :param poolclass: the class used by the pool module to provide
+ pooling. Defaults to :class:`.QueuePool`.
+
+ :param \**params: will be passed through to *poolclass*
+
+ """
+ try:
+ return proxies[module]
+ except KeyError:
+ return proxies.setdefault(module, _DBProxy(module, **params))
+
+
+def clear_managers():
+ """Remove all current DB-API 2.0 managers.
+
+ All pools and connections are disposed.
+ """
+
+ for manager in proxies.values():
+ manager.close()
+ proxies.clear()
+
+
+class _DBProxy(object):
+
+ """Layers connection pooling behavior on top of a standard DB-API module.
+
+ Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
+ specific connect parameters. Other functions and attributes are delegated
+ to the underlying DB-API module.
+ """
+
+ def __init__(self, module, poolclass=QueuePool, **kw):
+ """Initializes a new proxy.
+
+ module
+ a DB-API 2.0 module
+
+ poolclass
+ a Pool class, defaulting to QueuePool
+
+ Other parameters are sent to the Pool object's constructor.
+
+ """
+
+ self.module = module
+ self.kw = kw
+ self.poolclass = poolclass
+ self.pools = {}
+ self._create_pool_mutex = threading.Lock()
+
+ def close(self):
+ for key in list(self.pools):
+ del self.pools[key]
+
+ def __del__(self):
+ self.close()
+
+ def __getattr__(self, key):
+ return getattr(self.module, key)
+
+ def get_pool(self, *args, **kw):
+ key = self._serialize(*args, **kw)
+ try:
+ return self.pools[key]
+ except KeyError:
+ with self._create_pool_mutex:
+ if key not in self.pools:
+ kw.pop("sa_pool_key", None)
+ pool = self.poolclass(
+ lambda: self.module.connect(*args, **kw), **self.kw
+ )
+ self.pools[key] = pool
+ return pool
+ else:
+ return self.pools[key]
+
+ def connect(self, *args, **kw):
+ """Activate a connection to the database.
+
+ Connect to the database using this DBProxy's module and the given
+ connect arguments. If the arguments match an existing pool, the
+ connection will be returned from the pool's current thread-local
+ connection instance, or if there is no thread-local connection
+ instance it will be checked out from the set of pooled connections.
+
+ If the pool has no available connections and allows new connections
+ to be created, a new database connection will be made.
+
+ """
+
+ return self.get_pool(*args, **kw).connect()
+
+ def dispose(self, *args, **kw):
+ """Dispose the pool referenced by the given connect arguments."""
+
+ key = self._serialize(*args, **kw)
+ try:
+ del self.pools[key]
+ except KeyError:
+ pass
+
+ def _serialize(self, *args, **kw):
+ if "sa_pool_key" in kw:
+ return kw["sa_pool_key"]
+
+ return tuple(list(args) + [(k, kw[k]) for k in sorted(kw)])
diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py
new file mode 100644
index 0000000..2829a58
--- /dev/null
+++ b/lib/sqlalchemy/pool/events.py
@@ -0,0 +1,284 @@
+# sqlalchemy/pool/events.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+from .base import Pool
+from .. import event
+from ..engine.base import Engine
+
+
+class PoolEvents(event.Events):
+ """Available events for :class:`_pool.Pool`.
+
+ The methods here define the name of an event as well
+ as the names of members that are passed to listener
+ functions.
+
+ e.g.::
+
+ from sqlalchemy import event
+
+ def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
+ "handle an on checkout event"
+
+ event.listen(Pool, 'checkout', my_on_checkout)
+
+ In addition to accepting the :class:`_pool.Pool` class and
+ :class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts
+ :class:`_engine.Engine` objects and the :class:`_engine.Engine` class as
+ targets, which will be resolved to the ``.pool`` attribute of the
+ given engine or the :class:`_pool.Pool` class::
+
+ engine = create_engine("postgresql://scott:tiger@localhost/test")
+
+ # will associate with engine.pool
+ event.listen(engine, 'checkout', my_on_checkout)
+
+ """
+
+ _target_class_doc = "SomeEngineOrPool"
+ _dispatch_target = Pool
+
+ @classmethod
+ def _accept_with(cls, target):
+ if isinstance(target, type):
+ if issubclass(target, Engine):
+ return Pool
+ elif issubclass(target, Pool):
+ return target
+ elif isinstance(target, Engine):
+ return target.pool
+ elif isinstance(target, Pool):
+ return target
+ elif hasattr(target, "dispatch") and hasattr(
+ target.dispatch._events, "_no_async_engine_events"
+ ):
+ target.dispatch._events._no_async_engine_events()
+ else:
+ return None
+
+ @classmethod
+ def _listen(cls, event_key, **kw):
+ target = event_key.dispatch_target
+
+ kw.setdefault("asyncio", target._is_asyncio)
+
+ event_key.base_listen(**kw)
+
+ def connect(self, dbapi_connection, connection_record):
+ """Called at the moment a particular DBAPI connection is first
+ created for a given :class:`_pool.Pool`.
+
+ This event allows one to capture the point directly after which
+ the DBAPI module-level ``.connect()`` method has been used in order
+ to produce a new DBAPI connection.
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ """
+
+ def first_connect(self, dbapi_connection, connection_record):
+ """Called exactly once for the first time a DBAPI connection is
+ checked out from a particular :class:`_pool.Pool`.
+
+ The rationale for :meth:`_events.PoolEvents.first_connect`
+ is to determine
+ information about a particular series of database connections based
+ on the settings used for all connections. Since a particular
+ :class:`_pool.Pool`
+ refers to a single "creator" function (which in terms
+ of a :class:`_engine.Engine`
+ refers to the URL and connection options used),
+ it is typically valid to make observations about a single connection
+ that can be safely assumed to be valid about all subsequent
+ connections, such as the database version, the server and client
+ encoding settings, collation settings, and many others.
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ """
+
+ def checkout(self, dbapi_connection, connection_record, connection_proxy):
+ """Called when a connection is retrieved from the Pool.
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ :param connection_proxy: the :class:`._ConnectionFairy` object which
+ will proxy the public interface of the DBAPI connection for the
+ lifespan of the checkout.
+
+ If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
+ connection will be disposed and a fresh connection retrieved.
+ Processing of all checkout listeners will abort and restart
+ using the new connection.
+
+ .. seealso:: :meth:`_events.ConnectionEvents.engine_connect`
+ - a similar event
+ which occurs upon creation of a new :class:`_engine.Connection`.
+
+ """
+
+ def checkin(self, dbapi_connection, connection_record):
+ """Called when a connection returns to the pool.
+
+ Note that the connection may be closed, and may be None if the
+ connection has been invalidated. ``checkin`` will not be called
+ for detached connections. (They do not return to the pool.)
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ """
+
+ def reset(self, dbapi_connection, connection_record):
+ """Called before the "reset" action occurs for a pooled connection.
+
+ This event represents
+ when the ``rollback()`` method is called on the DBAPI connection
+ before it is returned to the pool. The behavior of "reset" can
+ be controlled, including disabled, using the ``reset_on_return``
+ pool argument.
+
+
+ The :meth:`_events.PoolEvents.reset` event is usually followed by the
+ :meth:`_events.PoolEvents.checkin` event is called, except in those
+ cases where the connection is discarded immediately after reset.
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ .. seealso::
+
+ :meth:`_events.ConnectionEvents.rollback`
+
+ :meth:`_events.ConnectionEvents.commit`
+
+ """
+
+ def invalidate(self, dbapi_connection, connection_record, exception):
+ """Called when a DBAPI connection is to be "invalidated".
+
+ This event is called any time the :meth:`._ConnectionRecord.invalidate`
+ method is invoked, either from API usage or via "auto-invalidation",
+ without the ``soft`` flag.
+
+ The event occurs before a final attempt to call ``.close()`` on the
+ connection occurs.
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ :param exception: the exception object corresponding to the reason
+ for this invalidation, if any. May be ``None``.
+
+ .. versionadded:: 0.9.2 Added support for connection invalidation
+ listening.
+
+ .. seealso::
+
+ :ref:`pool_connection_invalidation`
+
+ """
+
+ def soft_invalidate(self, dbapi_connection, connection_record, exception):
+ """Called when a DBAPI connection is to be "soft invalidated".
+
+ This event is called any time the :meth:`._ConnectionRecord.invalidate`
+ method is invoked with the ``soft`` flag.
+
+ Soft invalidation refers to when the connection record that tracks
+ this connection will force a reconnect after the current connection
+ is checked in. It does not actively close the dbapi_connection
+ at the point at which it is called.
+
+ .. versionadded:: 1.0.3
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ :param exception: the exception object corresponding to the reason
+ for this invalidation, if any. May be ``None``.
+
+ """
+
+ def close(self, dbapi_connection, connection_record):
+ """Called when a DBAPI connection is closed.
+
+ The event is emitted before the close occurs.
+
+ The close of a connection can fail; typically this is because
+ the connection is already closed. If the close operation fails,
+ the connection is discarded.
+
+ The :meth:`.close` event corresponds to a connection that's still
+ associated with the pool. To intercept close events for detached
+ connections use :meth:`.close_detached`.
+
+ .. versionadded:: 1.1
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ """
+
+ def detach(self, dbapi_connection, connection_record):
+ """Called when a DBAPI connection is "detached" from a pool.
+
+ This event is emitted after the detach occurs. The connection
+ is no longer associated with the given connection record.
+
+ .. versionadded:: 1.1
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ :param connection_record: the :class:`._ConnectionRecord` managing the
+ DBAPI connection.
+
+ """
+
+ def close_detached(self, dbapi_connection):
+ """Called when a detached DBAPI connection is closed.
+
+ The event is emitted before the close occurs.
+
+ The close of a connection can fail; typically this is because
+ the connection is already closed. If the close operation fails,
+ the connection is discarded.
+
+ .. versionadded:: 1.1
+
+ :param dbapi_connection: a DBAPI connection.
+ The :attr:`._ConnectionRecord.dbapi_connection` attribute.
+
+ """
diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py
new file mode 100644
index 0000000..91d0290
--- /dev/null
+++ b/lib/sqlalchemy/pool/impl.py
@@ -0,0 +1,514 @@
+# sqlalchemy/pool.py
+# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+
+
+"""Pool implementation classes.
+
+"""
+
+import traceback
+import weakref
+
+from .base import _AsyncConnDialect
+from .base import _ConnectionFairy
+from .base import _ConnectionRecord
+from .base import Pool
+from .. import exc
+from .. import util
+from ..util import chop_traceback
+from ..util import queue as sqla_queue
+from ..util import threading
+
+
+class QueuePool(Pool):
+
+ """A :class:`_pool.Pool`
+ that imposes a limit on the number of open connections.
+
+ :class:`.QueuePool` is the default pooling implementation used for
+ all :class:`_engine.Engine` objects, unless the SQLite dialect is in use.
+
+ """
+
+ _is_asyncio = False
+ _queue_class = sqla_queue.Queue
+
+ def __init__(
+ self,
+ creator,
+ pool_size=5,
+ max_overflow=10,
+ timeout=30.0,
+ use_lifo=False,
+ **kw
+ ):
+ r"""
+ Construct a QueuePool.
+
+ :param creator: a callable function that returns a DB-API
+ connection object, same as that of :paramref:`_pool.Pool.creator`.
+
+ :param pool_size: The size of the pool to be maintained,
+ defaults to 5. This is the largest number of connections that
+ will be kept persistently in the pool. Note that the pool
+ begins with no connections; once this number of connections
+ is requested, that number of connections will remain.
+ ``pool_size`` can be set to 0 to indicate no size limit; to
+ disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
+ instead.
+
+ :param max_overflow: The maximum overflow size of the
+ pool. When the number of checked-out connections reaches the
+ size set in pool_size, additional connections will be
+ returned up to this limit. When those additional connections
+ are returned to the pool, they are disconnected and
+ discarded. It follows then that the total number of
+ simultaneous connections the pool will allow is pool_size +
+ `max_overflow`, and the total number of "sleeping"
+ connections the pool will allow is pool_size. `max_overflow`
+ can be set to -1 to indicate no overflow limit; no limit
+ will be placed on the total number of concurrent
+ connections. Defaults to 10.
+
+ :param timeout: The number of seconds to wait before giving up
+ on returning a connection. Defaults to 30.0. This can be a float
+ but is subject to the limitations of Python time functions which
+ may not be reliable in the tens of milliseconds.
+
+ :param use_lifo: use LIFO (last-in-first-out) when retrieving
+ connections instead of FIFO (first-in-first-out). Using LIFO, a
+ server-side timeout scheme can reduce the number of connections used
+ during non-peak periods of use. When planning for server-side
+ timeouts, ensure that a recycle or pre-ping strategy is in use to
+ gracefully handle stale connections.
+
+ .. versionadded:: 1.3
+
+ .. seealso::
+
+ :ref:`pool_use_lifo`
+
+ :ref:`pool_disconnects`
+
+ :param \**kw: Other keyword arguments including
+ :paramref:`_pool.Pool.recycle`, :paramref:`_pool.Pool.echo`,
+ :paramref:`_pool.Pool.reset_on_return` and others are passed to the
+ :class:`_pool.Pool` constructor.
+
+ """
+ Pool.__init__(self, creator, **kw)
+ self._pool = self._queue_class(pool_size, use_lifo=use_lifo)
+ self._overflow = 0 - pool_size
+ self._max_overflow = max_overflow
+ self._timeout = timeout
+ self._overflow_lock = threading.Lock()
+
+ def _do_return_conn(self, conn):
+ try:
+ self._pool.put(conn, False)
+ except sqla_queue.Full:
+ try:
+ conn.close()
+ finally:
+ self._dec_overflow()
+
+ def _do_get(self):
+ use_overflow = self._max_overflow > -1
+
+ try:
+ wait = use_overflow and self._overflow >= self._max_overflow
+ return self._pool.get(wait, self._timeout)
+ except sqla_queue.Empty:
+ # don't do things inside of "except Empty", because when we say
+ # we timed out or can't connect and raise, Python 3 tells
+ # people the real error is queue.Empty which it isn't.
+ pass
+ if use_overflow and self._overflow >= self._max_overflow:
+ if not wait:
+ return self._do_get()
+ else:
+ raise exc.TimeoutError(
+ "QueuePool limit of size %d overflow %d reached, "
+ "connection timed out, timeout %0.2f"
+ % (self.size(), self.overflow(), self._timeout),
+ code="3o7r",
+ )
+
+ if self._inc_overflow():
+ try:
+ return self._create_connection()
+ except:
+ with util.safe_reraise():
+ self._dec_overflow()
+ else:
+ return self._do_get()
+
+ def _inc_overflow(self):
+ if self._max_overflow == -1:
+ self._overflow += 1
+ return True
+ with self._overflow_lock:
+ if self._overflow < self._max_overflow:
+ self._overflow += 1
+ return True
+ else:
+ return False
+
+ def _dec_overflow(self):
+ if self._max_overflow == -1:
+ self._overflow -= 1
+ return True
+ with self._overflow_lock:
+ self._overflow -= 1
+ return True
+
+ def recreate(self):
+ self.logger.info("Pool recreating")
+ return self.__class__(
+ self._creator,
+ pool_size=self._pool.maxsize,
+ max_overflow=self._max_overflow,
+ pre_ping=self._pre_ping,
+ use_lifo=self._pool.use_lifo,
+ timeout=self._timeout,
+ recycle=self._recycle,
+ echo=self.echo,
+ logging_name=self._orig_logging_name,
+ reset_on_return=self._reset_on_return,
+ _dispatch=self.dispatch,
+ dialect=self._dialect,
+ )
+
+ def dispose(self):
+ while True:
+ try:
+ conn = self._pool.get(False)
+ conn.close()
+ except sqla_queue.Empty:
+ break
+
+ self._overflow = 0 - self.size()
+ self.logger.info("Pool disposed. %s", self.status())
+
+ def status(self):
+ return (
+ "Pool size: %d Connections in pool: %d "
+ "Current Overflow: %d Current Checked out "
+ "connections: %d"
+ % (
+ self.size(),
+ self.checkedin(),
+ self.overflow(),
+ self.checkedout(),
+ )
+ )
+
+ def size(self):
+ return self._pool.maxsize
+
+ def timeout(self):
+ return self._timeout
+
+ def checkedin(self):
+ return self._pool.qsize()
+
+ def overflow(self):
+ return self._overflow
+
+ def checkedout(self):
+ return self._pool.maxsize - self._pool.qsize() + self._overflow
+
+
+class AsyncAdaptedQueuePool(QueuePool):
+ _is_asyncio = True
+ _queue_class = sqla_queue.AsyncAdaptedQueue
+ _dialect = _AsyncConnDialect()
+
+
+class FallbackAsyncAdaptedQueuePool(AsyncAdaptedQueuePool):
+ _queue_class = sqla_queue.FallbackAsyncAdaptedQueue
+
+
+class NullPool(Pool):
+
+ """A Pool which does not pool connections.
+
+ Instead it literally opens and closes the underlying DB-API connection
+ per each connection open/close.
+
+ Reconnect-related functions such as ``recycle`` and connection
+ invalidation are not supported by this Pool implementation, since
+ no connections are held persistently.
+
+ """
+
+ def status(self):
+ return "NullPool"
+
+ def _do_return_conn(self, conn):
+ conn.close()
+
+ def _do_get(self):
+ return self._create_connection()
+
+ def recreate(self):
+ self.logger.info("Pool recreating")
+
+ return self.__class__(
+ self._creator,
+ recycle=self._recycle,
+ echo=self.echo,
+ logging_name=self._orig_logging_name,
+ reset_on_return=self._reset_on_return,
+ pre_ping=self._pre_ping,
+ _dispatch=self.dispatch,
+ dialect=self._dialect,
+ )
+
+ def dispose(self):
+ pass
+
+
+class SingletonThreadPool(Pool):
+
+ """A Pool that maintains one connection per thread.
+
+ Maintains one connection per each thread, never moving a connection to a
+ thread other than the one which it was created in.
+
+ .. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
+ on arbitrary connections that exist beyond the size setting of
+ ``pool_size``, e.g. if more unique **thread identities**
+ than what ``pool_size`` states are used. This cleanup is
+ non-deterministic and not sensitive to whether or not the connections
+ linked to those thread identities are currently in use.
+
+ :class:`.SingletonThreadPool` may be improved in a future release,
+ however in its current status it is generally used only for test
+ scenarios using a SQLite ``:memory:`` database and is not recommended
+ for production use.
+
+
+ Options are the same as those of :class:`_pool.Pool`, as well as:
+
+ :param pool_size: The number of threads in which to maintain connections
+ at once. Defaults to five.
+
+ :class:`.SingletonThreadPool` is used by the SQLite dialect
+ automatically when a memory-based database is used.
+ See :ref:`sqlite_toplevel`.
+
+ """
+
+ _is_asyncio = False
+
+ def __init__(self, creator, pool_size=5, **kw):
+ Pool.__init__(self, creator, **kw)
+ self._conn = threading.local()
+ self._fairy = threading.local()
+ self._all_conns = set()
+ self.size = pool_size
+
+ def recreate(self):
+ self.logger.info("Pool recreating")
+ return self.__class__(
+ self._creator,
+ pool_size=self.size,
+ recycle=self._recycle,
+ echo=self.echo,
+ pre_ping=self._pre_ping,
+ logging_name=self._orig_logging_name,
+ reset_on_return=self._reset_on_return,
+ _dispatch=self.dispatch,
+ dialect=self._dialect,
+ )
+
+ def dispose(self):
+ """Dispose of this pool."""
+
+ for conn in self._all_conns:
+ try:
+ conn.close()
+ except Exception:
+ # pysqlite won't even let you close a conn from a thread
+ # that didn't create it
+ pass
+
+ self._all_conns.clear()
+
+ def _cleanup(self):
+ while len(self._all_conns) >= self.size:
+ c = self._all_conns.pop()
+ c.close()
+
+ def status(self):
+ return "SingletonThreadPool id:%d size: %d" % (
+ id(self),
+ len(self._all_conns),
+ )
+
+ def _do_return_conn(self, conn):
+ pass
+
+ def _do_get(self):
+ try:
+ c = self._conn.current()
+ if c:
+ return c
+ except AttributeError:
+ pass
+ c = self._create_connection()
+ self._conn.current = weakref.ref(c)
+ if len(self._all_conns) >= self.size:
+ self._cleanup()
+ self._all_conns.add(c)
+ return c
+
+ def connect(self):
+ # vendored from Pool to include the now removed use_threadlocal
+ # behavior
+ try:
+ rec = self._fairy.current()
+ except AttributeError:
+ pass
+ else:
+ if rec is not None:
+ return rec._checkout_existing()
+
+ return _ConnectionFairy._checkout(self, self._fairy)
+
+ def _return_conn(self, record):
+ try:
+ del self._fairy.current
+ except AttributeError:
+ pass
+ self._do_return_conn(record)
+
+
+class StaticPool(Pool):
+
+ """A Pool of exactly one connection, used for all requests.
+
+ Reconnect-related functions such as ``recycle`` and connection
+ invalidation (which is also used to support auto-reconnect) are only
+ partially supported right now and may not yield good results.
+
+
+ """
+
+ @util.memoized_property
+ def connection(self):
+ return _ConnectionRecord(self)
+
+ def status(self):
+ return "StaticPool"
+
+ def dispose(self):
+ if (
+ "connection" in self.__dict__
+ and self.connection.dbapi_connection is not None
+ ):
+ self.connection.close()
+ del self.__dict__["connection"]
+
+ def recreate(self):
+ self.logger.info("Pool recreating")
+ return self.__class__(
+ creator=self._creator,
+ recycle=self._recycle,
+ reset_on_return=self._reset_on_return,
+ pre_ping=self._pre_ping,
+ echo=self.echo,
+ logging_name=self._orig_logging_name,
+ _dispatch=self.dispatch,
+ dialect=self._dialect,
+ )
+
+ def _transfer_from(self, other_static_pool):
+ # used by the test suite to make a new engine / pool without
+ # losing the state of an existing SQLite :memory: connection
+ self._invoke_creator = (
+ lambda crec: other_static_pool.connection.dbapi_connection
+ )
+
+ def _create_connection(self):
+ raise NotImplementedError()
+
+ def _do_return_conn(self, conn):
+ pass
+
+ def _do_get(self):
+ rec = self.connection
+ if rec._is_hard_or_soft_invalidated():
+ del self.__dict__["connection"]
+ rec = self.connection
+
+ return rec
+
+
+class AssertionPool(Pool):
+
+ """A :class:`_pool.Pool` that allows at most one checked out connection at
+ any given time.
+
+ This will raise an exception if more than one connection is checked out
+ at a time. Useful for debugging code that is using more connections
+ than desired.
+
+ """
+
+ def __init__(self, *args, **kw):
+ self._conn = None
+ self._checked_out = False
+ self._store_traceback = kw.pop("store_traceback", True)
+ self._checkout_traceback = None
+ Pool.__init__(self, *args, **kw)
+
+ def status(self):
+ return "AssertionPool"
+
+ def _do_return_conn(self, conn):
+ if not self._checked_out:
+ raise AssertionError("connection is not checked out")
+ self._checked_out = False
+ assert conn is self._conn
+
+ def dispose(self):
+ self._checked_out = False
+ if self._conn:
+ self._conn.close()
+
+ def recreate(self):
+ self.logger.info("Pool recreating")
+ return self.__class__(
+ self._creator,
+ echo=self.echo,
+ pre_ping=self._pre_ping,
+ recycle=self._recycle,
+ reset_on_return=self._reset_on_return,
+ logging_name=self._orig_logging_name,
+ _dispatch=self.dispatch,
+ dialect=self._dialect,
+ )
+
+ def _do_get(self):
+ if self._checked_out:
+ if self._checkout_traceback:
+ suffix = " at:\n%s" % "".join(
+ chop_traceback(self._checkout_traceback)
+ )
+ else:
+ suffix = ""
+ raise AssertionError("connection is already checked out" + suffix)
+
+ if not self._conn:
+ self._conn = self._create_connection()
+
+ self._checked_out = True
+ if self._store_traceback:
+ self._checkout_traceback = traceback.format_stack()
+ return self._conn