diff --git a/README.md b/README.md index f9818d0..faf698d 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ Environment variables | `TENANT_HEADER` | `` | The name of the HTTP header which contains the tenant name for multi-tenant setups. | | `TENANT_PATH_PREFIX` | `@service_prefix@/@tenant@` | URL path prefix for all QWC services for multi-tenant setups. | | `TENANT_ACCESS_COOKIE_PATH` | `` | Path for which the access cookie is valid for multi-tenant setups. | +| `ENABLE_POOLING` | `False` | Enable db connection pooling. Defaults are used, if the following ENV are not set. | | `POOL_SIZE` | `5` | Maximum number of possible data base connections. | | `MAX_OVERFLOW` | `10` | Additional connections beyond pool_size during peak load. | | `POOL_TIMEOUT` | `30` | Time (in seconds) to wait for a connection to become available. | diff --git a/qwc_services_core/database.py b/qwc_services_core/database.py index 583d8d1..ed4ef1f 100644 --- a/qwc_services_core/database.py +++ b/qwc_services_core/database.py @@ -18,21 +18,26 @@ def db_engine(self, conn_str): see https://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql """ - db_pool_size = os.environ.get('POOL_SIZE', 5) - db_max_overflow = os.environ.get('MAX_OVERFLOW', 10) - db_pool_timeout = os.environ.get('POOL_TIMEOUT', 30) - db_pool_recycle = os.environ.get('POOL_RECYCLE', -1) + db_enable_pooling = os.environ.get('ENABLE_POOLING', 'False').lower() in ('t', 'true') + db_pool_size = int(os.environ.get('POOL_SIZE', 5)) + db_max_overflow = int(os.environ.get('MAX_OVERFLOW', 10)) + db_pool_timeout = int(os.environ.get('POOL_TIMEOUT', 30)) + db_pool_recycle = int(os.environ.get('POOL_RECYCLE', -1)) engine = self.engines.get(conn_str) if not engine: - engine = create_engine( - conn_str, - poolclass=QueuePool, - pool_size=db_pool_size, - max_overflow=db_max_overflow, - pool_timeout=db_pool_timeout, - pool_recycle=db_pool_recycle, - pool_pre_ping=True, echo=False) + if db_enable_pooling: + engine = create_engine( + conn_str, + poolclass=QueuePool, + pool_size=db_pool_size, + max_overflow=db_max_overflow, + pool_timeout=db_pool_timeout, + pool_recycle=db_pool_recycle, + pool_pre_ping=True, echo=False) + else: + engine = create_engine( + conn_str, pool_pre_ping=True, echo=False) self.engines[conn_str] = engine return engine