From 02c4ccd44398e7d285eef7adc8ffb1663715ac4b Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Tue, 11 Nov 2025 00:37:34 +0000 Subject: [PATCH 01/10] feat(stack): implement StatementStack and StackOperation for managing SQL execution sequences feat(exceptions): add StackExecutionError for handling stack operation failures feat(driver): integrate stack execution in async and sync drivers test: add unit tests for StatementStack and StackResult functionalities --- sqlspec/__init__.py | 8 ++ sqlspec/core/__init__.py | 6 +- sqlspec/core/result.py | 81 ++++++++++- sqlspec/core/stack.py | 162 ++++++++++++++++++++++ sqlspec/driver/_async.py | 98 ++++++++++++- sqlspec/driver/_common.py | 40 ++++++ sqlspec/driver/_sync.py | 107 +++++++++++++- sqlspec/exceptions.py | 38 +++++ sqlspec/protocols.py | 17 ++- tests/unit/test_core/test_result.py | 29 +++- tests/unit/test_core/test_stack.py | 97 +++++++++++++ tests/unit/test_driver/test_stack_base.py | 121 ++++++++++++++++ tests/unit/test_exceptions.py | 19 +++ 13 files changed, 814 insertions(+), 9 deletions(-) create mode 100644 sqlspec/core/stack.py create mode 100644 tests/unit/test_core/test_stack.py create mode 100644 tests/unit/test_driver/test_stack_base.py diff --git a/sqlspec/__init__.py b/sqlspec/__init__.py index 1267682ed..626c30d96 100644 --- a/sqlspec/__init__.py +++ b/sqlspec/__init__.py @@ -29,11 +29,15 @@ ParameterStyle, ParameterStyleConfig, SQLResult, + StackOperation, + StackResult, Statement, StatementConfig, + StatementStack, ) from sqlspec.core import filters as filters from sqlspec.driver import AsyncDriverAdapterBase, ExecutionResult, SyncDriverAdapterBase +from sqlspec.exceptions import StackExecutionError from sqlspec.loader import SQLFile, SQLFileLoader from sqlspec.typing import ConnectionT, PoolT, SchemaT, StatementParameters, SupportedSchemaModel from sqlspec.utils.logging import suppress_erroneous_sqlglot_log_messages @@ -70,9 +74,13 @@ "SQLSpec", "SchemaT", "Select", + "StackExecutionError", + "StackOperation", + "StackResult", "Statement", "StatementConfig", "StatementParameters", + "StatementStack", "SupportedSchemaModel", "SyncDatabaseConfig", "SyncDriverAdapterBase", diff --git a/sqlspec/core/__init__.py b/sqlspec/core/__init__.py index 1921db7a1..be7ff403a 100644 --- a/sqlspec/core/__init__.py +++ b/sqlspec/core/__init__.py @@ -171,7 +171,8 @@ validate_parameter_alignment, wrap_with_type, ) -from sqlspec.core.result import ArrowResult, SQLResult, StatementResult, create_arrow_result, create_sql_result +from sqlspec.core.result import ArrowResult, SQLResult, StackResult, StatementResult, create_arrow_result, create_sql_result +from sqlspec.core.stack import StackOperation, StatementStack from sqlspec.core.splitter import split_sql_script from sqlspec.core.statement import ( SQL, @@ -230,6 +231,9 @@ "ProcessedState", "SQLProcessor", "SQLResult", + "StackOperation", + "StackResult", + "StatementStack", "SearchFilter", "Statement", "StatementConfig", diff --git a/sqlspec/core/result.py b/sqlspec/core/result.py index 4745e9df8..8566bd1de 100644 --- a/sqlspec/core/result.py +++ b/sqlspec/core/result.py @@ -33,7 +33,7 @@ from sqlspec.typing import ArrowTable, PandasDataFrame, PolarsDataFrame, SchemaT -__all__ = ("ArrowResult", "SQLResult", "StatementResult") +__all__ = ("ArrowResult", "SQLResult", "StackResult", "StatementResult") T = TypeVar("T") @@ -875,6 +875,85 @@ def __iter__(self) -> "Iterator[dict[str, Any]]": yield from self.data.to_pylist() +class StackResult: + """Concrete stack result wrapper that preserves the original driver result.""" + + __slots__ = ("error", "metadata", "raw_result", "rowcount", "warning") + + def __init__( + self, + raw_result: "StatementResult | ArrowResult | None" = None, + *, + rowcount: int | None = None, + error: Exception | None = None, + warning: Any | None = None, + metadata: "dict[str, Any] | None" = None, + ) -> None: + self.raw_result = raw_result + self.rowcount = rowcount if rowcount is not None else _infer_rowcount(raw_result) + self.error = error + self.warning = warning + self.metadata = dict(metadata) if metadata else None + + def __iter__(self) -> "Iterator[Any]": + yield from self.rows + + @property + def rows(self) -> "tuple[Any, ...]": + """Return cached rows from the underlying result when available.""" + + if self.raw_result is None: + return () + try: + return tuple(self.raw_result) + except TypeError: # pragma: no cover - defensive fallback + return () + + def is_error(self) -> bool: + """Return True when the stack operation captured an error.""" + + return self.error is not None + + def with_error(self, error: Exception) -> "StackResult": + """Return a copy of the result that records the provided error.""" + + return StackResult( + raw_result=self.raw_result, + rowcount=self.rowcount, + warning=self.warning, + metadata=self.metadata, + error=error, + ) + + @classmethod + def from_sql_result(cls, result: "SQLResult") -> "StackResult": + """Convert a standard SQLResult into a stack-friendly representation.""" + + metadata = dict(result.metadata) if result.metadata else None + warning = metadata.get("warning") if metadata else None + return cls(raw_result=result, rowcount=result.rows_affected, warning=warning, metadata=metadata) + + @classmethod + def from_arrow_result(cls, result: "ArrowResult") -> "StackResult": + """Create a stack result from an ArrowResult instance.""" + + metadata = dict(result.metadata) if result.metadata else None + return cls(raw_result=result, rowcount=result.rows_affected, metadata=metadata) + + @classmethod + def from_error(cls, error: Exception) -> "StackResult": + """Create an error-only stack result.""" + + return cls(raw_result=None, rowcount=0, error=error) + + +def _infer_rowcount(result: "StatementResult | ArrowResult | None") -> int: + if result is None: + return 0 + rowcount = getattr(result, "rows_affected", None) + return int(rowcount) if isinstance(rowcount, int) else 0 + + def create_sql_result( statement: "SQL", data: list[dict[str, Any]] | None = None, diff --git a/sqlspec/core/stack.py b/sqlspec/core/stack.py new file mode 100644 index 000000000..0fc76dbd9 --- /dev/null +++ b/sqlspec/core/stack.py @@ -0,0 +1,162 @@ +"""Immutable builder utilities for multi-statement execution stacks.""" + +from collections.abc import Iterator, Mapping, Sequence +from types import MappingProxyType +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: # pragma: no cover + from sqlspec.builder import QueryBuilder + from sqlspec.core.statement import Statement, StatementConfig, StatementFilter + from sqlspec.typing import StatementParameters +__all__ = ("StackOperation", "StatementStack") + + +ALLOWED_METHODS: tuple[str, ...] = ("execute", "execute_many", "execute_script", "execute_arrow") + + +class StackOperation: + """Single SQL operation captured inside a statement stack.""" + + __slots__ = ("arguments", "keyword_arguments", "method", "statement") + + def __init__( + self, + method: str, + statement: Any, + arguments: "tuple[Any, ...] | None" = None, + keyword_arguments: "Mapping[str, Any] | None" = None, + ) -> None: + if method not in ALLOWED_METHODS: + msg = f"Unsupported stack method {method!r}" + raise ValueError(msg) + self.method = method + self.statement = statement + self.arguments = arguments if arguments is not None else () + self.keyword_arguments = keyword_arguments + + +class StatementStack: + """Immutable builder that preserves ordered SQL operations.""" + + __slots__ = ("_operations",) + + def __init__(self, operations: "tuple[StackOperation, ...] | None" = None) -> None: + self._operations = operations if operations is not None else () + + def __iter__(self) -> "Iterator[StackOperation]": + return iter(self._operations) + + def __len__(self) -> int: # pragma: no cover - trivial + return len(self._operations) + + def __bool__(self) -> bool: # pragma: no cover - trivial + return bool(self._operations) + + def __repr__(self) -> str: + return f"StatementStack(size={len(self._operations)})" + + @property + def operations(self) -> "tuple[StackOperation, ...]": + return self._operations + + def push_execute( + self, + statement: "str | Statement | QueryBuilder", + /, + *parameters: "StatementParameters | StatementFilter", + statement_config: "StatementConfig | None" = None, + **kwargs: Any, + ) -> "StatementStack": + normalized_statement = _validate_statement(statement) + frozen_kwargs = _freeze_kwargs(kwargs, statement_config) + operation = StackOperation("execute", normalized_statement, tuple(parameters), frozen_kwargs) + return self._append(operation) + + def push_execute_many( + self, + statement: "str | Statement | QueryBuilder", + parameter_sets: "Sequence[StatementParameters]", + /, + *filters: "StatementParameters | StatementFilter", + statement_config: "StatementConfig | None" = None, + **kwargs: Any, + ) -> "StatementStack": + normalized_statement = _validate_statement(statement) + _validate_execute_many_payload(parameter_sets) + normalized_sets = tuple(parameter_sets) + arguments = (normalized_sets, *filters) + frozen_kwargs = _freeze_kwargs(kwargs, statement_config) + operation = StackOperation("execute_many", normalized_statement, tuple(arguments), frozen_kwargs) + return self._append(operation) + + def push_execute_script( + self, + statement: "str | Statement", + /, + *parameters: "StatementParameters | StatementFilter", + statement_config: "StatementConfig | None" = None, + **kwargs: Any, + ) -> "StatementStack": + normalized_statement = _validate_statement(statement) + frozen_kwargs = _freeze_kwargs(kwargs, statement_config) + operation = StackOperation("execute_script", normalized_statement, tuple(parameters), frozen_kwargs) + return self._append(operation) + + def push_execute_arrow( + self, + statement: "str | Statement | QueryBuilder", + /, + *parameters: "StatementParameters | StatementFilter", + statement_config: "StatementConfig | None" = None, + **kwargs: Any, + ) -> "StatementStack": + normalized_statement = _validate_statement(statement) + frozen_kwargs = _freeze_kwargs(kwargs, statement_config) + operation = StackOperation("execute_arrow", normalized_statement, tuple(parameters), frozen_kwargs) + return self._append(operation) + + def extend(self, *stacks: "StatementStack") -> "StatementStack": + operations = list(self._operations) + for stack in stacks: + operations.extend(stack._operations) + return StatementStack(tuple(operations)) + + @classmethod + def from_operations(cls, operations: "Sequence[StackOperation] | None" = None) -> "StatementStack": + if not operations: + return cls() + return cls(tuple(operations)) + + def _append(self, operation: StackOperation) -> "StatementStack": + return StatementStack((*self._operations, operation)) + + +def _validate_statement(statement: Any) -> Any: + if isinstance(statement, StatementStack): + msg = "Nested StatementStack instances are not supported" + raise TypeError(msg) + if isinstance(statement, str): + stripped = statement.strip() + if not stripped: + msg = "Stack statements require non-empty SQL strings" + raise ValueError(msg) + return statement + return statement + + +def _validate_execute_many_payload(parameter_sets: Any) -> None: + if not isinstance(parameter_sets, Sequence) or isinstance(parameter_sets, (str, bytes, bytearray)): + msg = "execute_many payload must be a sequence of parameter sets" + raise TypeError(msg) + if not parameter_sets: + msg = "execute_many payload cannot be empty" + raise ValueError(msg) + + +def _freeze_kwargs(kwargs: "dict[str, Any]", statement_config: "StatementConfig | None") -> "Mapping[str, Any] | None": + if not kwargs and statement_config is None: + return None + payload = dict(kwargs) + if statement_config is not None: + payload["statement_config"] = statement_config + return MappingProxyType(payload) diff --git a/sqlspec/driver/_async.py b/sqlspec/driver/_async.py index d6103c203..d389b7746 100644 --- a/sqlspec/driver/_async.py +++ b/sqlspec/driver/_async.py @@ -4,16 +4,18 @@ from time import perf_counter from typing import TYPE_CHECKING, Any, Final, TypeVar, overload -from sqlspec.core import SQL, Statement, create_arrow_result +from sqlspec.core import SQL, StackResult, Statement, create_arrow_result +from sqlspec.core.stack import StackOperation, StatementStack from sqlspec.driver._common import ( CommonDriverAttributesMixin, DataDictionaryMixin, ExecutionResult, VersionInfo, + describe_stack_statement, handle_single_row_error, ) from sqlspec.driver.mixins import SQLTranslatorMixin, StorageDriverMixin -from sqlspec.exceptions import ImproperConfigurationError +from sqlspec.exceptions import ImproperConfigurationError, StackExecutionError from sqlspec.utils.arrow_helpers import convert_dict_to_arrow from sqlspec.utils.logging import get_logger from sqlspec.utils.module_loader import ensure_pyarrow @@ -187,6 +189,75 @@ async def _execute_script(self, cursor: Any, statement: "SQL") -> ExecutionResul cursor, statement_count=statement_count, successful_statements=successful_count, is_script_result=True ) + async def execute_stack( + self, stack: "StatementStack", *, continue_on_error: bool = False + ) -> "tuple[StackResult, ...]": + """Execute a StatementStack sequentially using the adapter's primitives.""" + + if not isinstance(stack, StatementStack): + msg = "execute_stack expects a StatementStack instance" + raise TypeError(msg) + if not stack: + msg = "Cannot execute an empty StatementStack" + raise ValueError(msg) + + results: list[StackResult] = [] + started_transaction = False + single_transaction = not continue_on_error + mode_label = "continue-on-error" if continue_on_error else "fail-fast" + logger.debug( + "Executing statement stack: driver=%s size=%s mode=%s in_tx=%s", + type(self).__name__, + len(stack.operations), + mode_label, + self._connection_in_transaction(), + ) + + try: + if single_transaction and not self._connection_in_transaction(): + await self.begin() + started_transaction = True + + for index, operation in enumerate(stack.operations): + try: + raw_result = await self._execute_stack_operation(operation) + except Exception as exc: # pragma: no cover - exercised via tests + stack_error = StackExecutionError( + index, + describe_stack_statement(operation.statement), + exc, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", + ) + + if started_transaction and not continue_on_error: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + started_transaction = False + + if continue_on_error: + logger.debug("Stack operation %s failed in continue-on-error mode: %s", index, exc) + results.append(StackResult.from_error(stack_error)) + continue + + raise stack_error from exc + + results.append(StackResult(raw_result=raw_result)) + + if started_transaction: + await self.commit() + except Exception: + if started_transaction: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + raise + + return tuple(results) + @abstractmethod async def _execute_many(self, cursor: Any, statement: "SQL") -> ExecutionResult: """Execute SQL with multiple parameter sets (executemany). @@ -554,6 +625,29 @@ async def select_with_total( return (select_result.get_data(schema_type=schema_type), count_result.scalar()) + async def _execute_stack_operation(self, operation: "StackOperation") -> "SQLResult | ArrowResult | None": + kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {} + + if operation.method == "execute": + return await self.execute(operation.statement, *operation.arguments, **kwargs) + + if operation.method == "execute_many": + if not operation.arguments: + msg = "execute_many stack operation requires parameter sets" + raise ValueError(msg) + parameter_sets = operation.arguments[0] + filters = operation.arguments[1:] + return await self.execute_many(operation.statement, parameter_sets, *filters, **kwargs) + + if operation.method == "execute_script": + return await self.execute_script(operation.statement, *operation.arguments, **kwargs) + + if operation.method == "execute_arrow": + return await self.select_to_arrow(operation.statement, *operation.arguments, **kwargs) + + msg = f"Unsupported stack operation method: {operation.method}" + raise ValueError(msg) + class AsyncDataDictionaryBase(DataDictionaryMixin): """Base class for asynchronous data dictionary implementations.""" diff --git a/sqlspec/driver/_common.py b/sqlspec/driver/_common.py index 3aa16e508..ef145c04e 100644 --- a/sqlspec/driver/_common.py +++ b/sqlspec/driver/_common.py @@ -42,6 +42,7 @@ "ExecutionResult", "ScriptExecutionResult", "VersionInfo", + "describe_stack_statement", "handle_single_row_error", "make_cache_key_hashable", ) @@ -107,6 +108,20 @@ def make_cache_key_hashable(obj: Any) -> Any: return obj +def describe_stack_statement(statement: Any) -> str: + """Return a readable representation of a stack statement for diagnostics.""" + + if isinstance(statement, str): + return statement + raw_sql = getattr(statement, "raw_sql", None) + if isinstance(raw_sql, str): + return raw_sql + sql_attr = getattr(statement, "sql", None) + if isinstance(sql_attr, str): + return sql_attr + return repr(statement) + + def handle_single_row_error(error: ValueError) -> "NoReturn": """Normalize single-row selection errors to SQLSpec exceptions.""" @@ -518,6 +533,31 @@ def prepare_statement( return sql_statement + def _connection_in_transaction(self) -> bool: + """Best-effort detection of whether the underlying connection is inside a transaction.""" + + connection = getattr(self, "connection", None) + if connection is None: + return False + + indicator = getattr(connection, "in_transaction", None) + if isinstance(indicator, bool): + return indicator + + checker = getattr(connection, "is_in_transaction", None) + if callable(checker): + try: + return bool(checker()) + except Exception: # pragma: no cover - driver-specific edge cases + return False + + status = getattr(connection, "transaction_status", None) + if isinstance(status, str): + lowered = status.lower() + return "idle" not in lowered + + return False + def split_script_statements( self, script: str, statement_config: "StatementConfig", strip_trailing_semicolon: bool = False ) -> list[str]: diff --git a/sqlspec/driver/_sync.py b/sqlspec/driver/_sync.py index b349aeece..9e482e557 100644 --- a/sqlspec/driver/_sync.py +++ b/sqlspec/driver/_sync.py @@ -4,16 +4,18 @@ from time import perf_counter from typing import TYPE_CHECKING, Any, Final, TypeVar, overload -from sqlspec.core import SQL, create_arrow_result +from sqlspec.core import SQL, StackResult, create_arrow_result +from sqlspec.core.stack import StackOperation, StatementStack from sqlspec.driver._common import ( CommonDriverAttributesMixin, DataDictionaryMixin, ExecutionResult, VersionInfo, + describe_stack_statement, handle_single_row_error, ) from sqlspec.driver.mixins import SQLTranslatorMixin, StorageDriverMixin -from sqlspec.exceptions import ImproperConfigurationError +from sqlspec.exceptions import ImproperConfigurationError, StackExecutionError from sqlspec.utils.arrow_helpers import convert_dict_to_arrow from sqlspec.utils.logging import get_logger from sqlspec.utils.module_loader import ensure_pyarrow @@ -181,12 +183,88 @@ def _execute_script(self, cursor: Any, statement: "SQL") -> ExecutionResult: for stmt in statements: single_stmt = statement.copy(statement=stmt, parameters=prepared_parameters) self._execute_statement(cursor, single_stmt) - successful_count += 1 + successful_count += 1 return self.create_execution_result( cursor, statement_count=statement_count, successful_statements=successful_count, is_script_result=True ) + def execute_stack( + self, + stack: "StatementStack", + *, + continue_on_error: bool = False, + ) -> "tuple[StackResult, ...]": + """Execute a StatementStack sequentially using the adapter's primitives.""" + + if not isinstance(stack, StatementStack): + msg = "execute_stack expects a StatementStack instance" + raise TypeError(msg) + if not stack: + msg = "Cannot execute an empty StatementStack" + raise ValueError(msg) + + results: list[StackResult] = [] + started_transaction = False + single_transaction = not continue_on_error + mode_label = "continue-on-error" if continue_on_error else "fail-fast" + logger.debug( + "Executing statement stack: driver=%s size=%s mode=%s in_tx=%s", + type(self).__name__, + len(stack.operations), + mode_label, + self._connection_in_transaction(), + ) + + try: + if single_transaction and not self._connection_in_transaction(): + self.begin() + started_transaction = True + + for index, operation in enumerate(stack.operations): + try: + raw_result = self._execute_stack_operation(operation) + except Exception as exc: # pragma: no cover - exercised via tests + stack_error = StackExecutionError( + index, + describe_stack_statement(operation.statement), + exc, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", + ) + + if started_transaction and not continue_on_error: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + started_transaction = False + + if continue_on_error: + logger.debug( + "Stack operation %s failed in continue-on-error mode: %s", + index, + exc, + ) + results.append(StackResult.from_error(stack_error)) + continue + + raise stack_error from exc + + results.append(StackResult(raw_result=raw_result)) + + if started_transaction: + self.commit() + except Exception: + if started_transaction: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + raise + + return tuple(results) + @abstractmethod def _execute_many(self, cursor: Any, statement: "SQL") -> ExecutionResult: """Execute SQL with multiple parameter sets (executemany). @@ -556,6 +634,29 @@ def select_with_total( return (select_result.get_data(schema_type=schema_type), count_result.scalar()) + def _execute_stack_operation(self, operation: "StackOperation") -> "SQLResult | ArrowResult | None": + kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {} + + if operation.method == "execute": + return self.execute(operation.statement, *operation.arguments, **kwargs) + + if operation.method == "execute_many": + if not operation.arguments: + msg = "execute_many stack operation requires parameter sets" + raise ValueError(msg) + parameter_sets = operation.arguments[0] + filters = operation.arguments[1:] + return self.execute_many(operation.statement, parameter_sets, *filters, **kwargs) + + if operation.method == "execute_script": + return self.execute_script(operation.statement, *operation.arguments, **kwargs) + + if operation.method == "execute_arrow": + return self.select_to_arrow(operation.statement, *operation.arguments, **kwargs) + + msg = f"Unsupported stack operation method: {operation.method}" + raise ValueError(msg) + class SyncDataDictionaryBase(DataDictionaryMixin): """Base class for synchronous data dictionary implementations.""" diff --git a/sqlspec/exceptions.py b/sqlspec/exceptions.py index 2ebfa53a8..753b7850c 100644 --- a/sqlspec/exceptions.py +++ b/sqlspec/exceptions.py @@ -27,6 +27,7 @@ "SQLFileParseError", "SQLParsingError", "SQLSpecError", + "StackExecutionError", "SerializationError", "StorageCapabilityError", "StorageOperationFailedError", @@ -170,6 +171,43 @@ class DataError(SQLSpecError): """Invalid data type or format for database operation.""" +class StackExecutionError(SQLSpecError): + """Raised when a statement stack operation fails.""" + + def __init__( + self, + operation_index: int, + sql: str, + original_error: Exception, + *, + adapter: str | None = None, + mode: str = "fail-fast", + native_pipeline: bool | None = None, + downgrade_reason: str | None = None, + ) -> None: + pipeline_state = "enabled" if native_pipeline else "disabled" + adapter_label = adapter or "unknown-adapter" + preview = " ".join(sql.strip().split()) + if len(preview) > 120: + preview = f"{preview[:117]}..." + detail = ( + f"Stack operation {operation_index} failed on {adapter_label} " + f"(mode={mode}, pipeline={pipeline_state}) sql={preview}" + ) + super().__init__(detail) + self.operation_index = operation_index + self.sql = sql + self.original_error = original_error + self.adapter = adapter + self.mode = mode + self.native_pipeline = native_pipeline + self.downgrade_reason = downgrade_reason + + def __str__(self) -> str: + base = super().__str__() + return f"{base}: {self.original_error}" if self.original_error else base + + class OperationalError(SQLSpecError): """Operational database error (timeout, disk full, resource limit).""" diff --git a/sqlspec/protocols.py b/sqlspec/protocols.py index 413212241..0b5886f35 100644 --- a/sqlspec/protocols.py +++ b/sqlspec/protocols.py @@ -4,7 +4,7 @@ and runtime isinstance() checks. """ -from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable +from typing import TYPE_CHECKING, Any, Mapping, Protocol, Sequence, runtime_checkable from typing_extensions import Self @@ -39,6 +39,7 @@ "ParameterValueProtocol", "SQLBuilderProtocol", "SelectBuilderProtocol", + "StackResultProtocol", "SupportsArrowResults", "WithMethodProtocol", ) @@ -480,3 +481,17 @@ def select_to_arrow( ArrowResult containing Arrow data. """ ... +@runtime_checkable +class StackResultProtocol(Protocol): + """Protocol describing stack execution results.""" + + raw_result: Any + rowcount: int + error: Exception | None + warning: Any | None + metadata: Mapping[str, Any] | None + + @property + def rows(self) -> Sequence[Any]: ... + + def is_error(self) -> bool: ... diff --git a/tests/unit/test_core/test_result.py b/tests/unit/test_core/test_result.py index ce913784d..4d2419dcd 100644 --- a/tests/unit/test_core/test_result.py +++ b/tests/unit/test_core/test_result.py @@ -4,7 +4,7 @@ import pytest -from sqlspec.core import SQL, SQLResult, create_sql_result +from sqlspec.core import SQL, SQLResult, StackResult, create_sql_result pytestmark = pytest.mark.xdist_group("core") @@ -200,6 +200,33 @@ class UserDict(TypedDict): assert users[1]["name"] == "Bob" +def test_stack_result_from_sql_result() -> None: + sql_stmt = SQL("SELECT * FROM users") + sql_result = SQLResult(statement=sql_stmt, data=[{"id": 1}], rows_affected=1, metadata={"warning": "slow"}) + + stack_result = StackResult.from_sql_result(sql_result) + + assert stack_result.rowcount == 1 + assert stack_result.warning == "slow" + assert stack_result.raw_result is sql_result + assert list(stack_result.rows) == [{"id": 1}] + + +def test_stack_result_with_error_and_factory() -> None: + sql_stmt = SQL("SELECT 1") + sql_result = SQLResult(statement=sql_stmt, data=[{"value": 1}], rows_affected=1) + stack_result = StackResult(raw_result=sql_result) + + updated = stack_result.with_error(ValueError("boom")) + assert updated.error is not None + assert updated.raw_result is sql_result + assert updated.rows == stack_result.rows + + failure = StackResult.from_error(RuntimeError("stack")) + assert failure.is_error() + assert list(failure) == [] + + def test_sql_result_all_with_schema_type() -> None: """Test SQLResult.all() with schema_type parameter.""" from dataclasses import dataclass diff --git a/tests/unit/test_core/test_stack.py b/tests/unit/test_core/test_stack.py new file mode 100644 index 000000000..abaf9cc23 --- /dev/null +++ b/tests/unit/test_core/test_stack.py @@ -0,0 +1,97 @@ +"""Tests for the StatementStack builder utilities.""" + +from typing import Any + +import pytest + +from sqlspec.core import StackOperation, StatementStack + +pytestmark = pytest.mark.xdist_group("core") + + +def test_push_execute_is_immutable() -> None: + stack = StatementStack() + new_stack = stack.push_execute("SELECT 1 WHERE id = :id", {"id": 1}) + + assert len(stack) == 0 + assert len(new_stack) == 1 + operation = new_stack.operations[0] + assert operation.method == "execute" + assert operation.statement == "SELECT 1 WHERE id = :id" + assert operation.arguments == ({"id": 1},) + assert stack is not new_stack + + +def test_push_execute_many_validates_payload() -> None: + stack = StatementStack() + with pytest.raises(TypeError, match="sequence of parameter sets"): + stack.push_execute_many("INSERT", "invalid") + with pytest.raises(ValueError, match="cannot be empty"): + stack.push_execute_many("INSERT", []) + + +def test_push_execute_script_requires_non_empty_sql() -> None: + stack = StatementStack() + with pytest.raises(ValueError, match="non-empty SQL"): + stack.push_execute_script(" ") + + +def test_push_execute_many_stores_filters_and_kwargs() -> None: + stack = StatementStack().push_execute_many( + "INSERT", + [{"x": 1}], + {"filter": True}, + statement_config=None, + chunk_size=50, + ) + operation = stack.operations[0] + assert operation.method == "execute_many" + assert operation.arguments[0] == ({"x": 1},) + assert operation.arguments[1] == {"filter": True} + assert operation.keyword_arguments is not None + assert operation.keyword_arguments["chunk_size"] == 50 + + +def test_extend_and_from_operations() -> None: + base = StatementStack().push_execute("SELECT 1") + duplicate = StatementStack.from_operations(base.operations) + merged = base.extend(duplicate) + + assert len(duplicate) == 1 + assert len(merged) == 2 + assert all(isinstance(op, StackOperation) for op in merged) + + +def test_reject_nested_stack() -> None: + stack = StatementStack() + with pytest.raises(TypeError, match="Nested StatementStack"): + stack.push_execute(stack) + + +def test_freeze_kwargs_includes_statement_config() -> None: + class DummyConfig: + pass + + config = DummyConfig() + stack = StatementStack().push_execute("SELECT 1", statement_config=config) + operation = stack.operations[0] + assert operation.keyword_arguments is not None + assert operation.keyword_arguments["statement_config"] is config + + +@pytest.mark.parametrize("statement", ["SELECT 1", object()]) +def test_validate_statement_allows_non_strings(statement: Any) -> None: + stack = StatementStack().push_execute(statement) + assert stack.operations[0].statement is statement + + +def test_push_execute_arrow_records_kwargs() -> None: + stack = StatementStack().push_execute_arrow( + "SELECT * FROM items", {"limit": 10}, return_format="batch", native_only=True + ) + operation = stack.operations[0] + assert operation.method == "execute_arrow" + assert operation.arguments[0] == {"limit": 10} + assert operation.keyword_arguments is not None + assert operation.keyword_arguments["return_format"] == "batch" + assert operation.keyword_arguments["native_only"] is True diff --git a/tests/unit/test_driver/test_stack_base.py b/tests/unit/test_driver/test_stack_base.py new file mode 100644 index 000000000..5fe9a537c --- /dev/null +++ b/tests/unit/test_driver/test_stack_base.py @@ -0,0 +1,121 @@ +"""Unit tests for the driver execute_stack implementations.""" + +import types + +import pytest + +from sqlspec import StatementStack +from sqlspec.exceptions import StackExecutionError + + +@pytest.mark.asyncio +async def test_async_execute_stack_fail_fast_rolls_back(mock_async_driver) -> None: + original_execute = mock_async_driver.execute + + async def failing_execute(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + if isinstance(statement, str) and "FAIL" in statement: + raise ValueError("boom") + return await original_execute(statement, *params, **kwargs) + + mock_async_driver.execute = types.MethodType(failing_execute, mock_async_driver) + + stack = StatementStack().push_execute("INSERT INTO t (id) VALUES (1)").push_execute("FAIL SELECT 1") + + with pytest.raises(StackExecutionError) as excinfo: + await mock_async_driver.execute_stack(stack) + + assert excinfo.value.operation_index == 1 + assert mock_async_driver.connection.in_transaction is False + + +@pytest.mark.asyncio +async def test_async_execute_stack_continue_on_error(mock_async_driver) -> None: + original_execute = mock_async_driver.execute + + async def failing_execute(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + if isinstance(statement, str) and "FAIL" in statement: + raise ValueError("boom") + return await original_execute(statement, *params, **kwargs) + + mock_async_driver.execute = types.MethodType(failing_execute, mock_async_driver) + + stack = StatementStack().push_execute("INSERT INTO t (id) VALUES (1)").push_execute("FAIL SELECT 1") + + results = await mock_async_driver.execute_stack(stack, continue_on_error=True) + + assert len(results) == 2 + assert results[0].error is None + assert isinstance(results[1].error, StackExecutionError) + assert mock_async_driver.connection.in_transaction is False + + +@pytest.mark.asyncio +async def test_async_execute_stack_execute_arrow(mock_async_driver) -> None: + sentinel = object() + + async def fake_select_to_arrow(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + return sentinel + + mock_async_driver.select_to_arrow = types.MethodType(fake_select_to_arrow, mock_async_driver) + + stack = StatementStack().push_execute_arrow("SELECT * FROM items") + + results = await mock_async_driver.execute_stack(stack) + + assert len(results) == 1 + assert results[0].raw_result is sentinel + + +def test_sync_execute_stack_fail_fast_rolls_back(mock_sync_driver) -> None: + original_execute = mock_sync_driver.execute + + def failing_execute(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + if isinstance(statement, str) and "FAIL" in statement: + raise ValueError("boom") + return original_execute(statement, *params, **kwargs) + + mock_sync_driver.execute = types.MethodType(failing_execute, mock_sync_driver) + + stack = StatementStack().push_execute("INSERT INTO t (id) VALUES (1)").push_execute("FAIL SELECT 1") + + with pytest.raises(StackExecutionError) as excinfo: + mock_sync_driver.execute_stack(stack) + + assert excinfo.value.operation_index == 1 + assert mock_sync_driver.connection.in_transaction is False + + +def test_sync_execute_stack_continue_on_error(mock_sync_driver) -> None: + original_execute = mock_sync_driver.execute + + def failing_execute(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + if isinstance(statement, str) and "FAIL" in statement: + raise ValueError("boom") + return original_execute(statement, *params, **kwargs) + + mock_sync_driver.execute = types.MethodType(failing_execute, mock_sync_driver) + + stack = StatementStack().push_execute("INSERT INTO t (id) VALUES (1)").push_execute("FAIL SELECT 1") + + results = mock_sync_driver.execute_stack(stack, continue_on_error=True) + + assert len(results) == 2 + assert results[0].error is None + assert isinstance(results[1].error, StackExecutionError) + assert mock_sync_driver.connection.in_transaction is False + + +def test_sync_execute_stack_execute_arrow(mock_sync_driver) -> None: + sentinel = object() + + def fake_select_to_arrow(self, statement, *params, **kwargs): # type: ignore[no-untyped-def] + return sentinel + + mock_sync_driver.select_to_arrow = types.MethodType(fake_select_to_arrow, mock_sync_driver) + + stack = StatementStack().push_execute_arrow("SELECT * FROM items") + + results = mock_sync_driver.execute_stack(stack) + + assert len(results) == 1 + assert results[0].raw_result is sentinel diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py index 57d593439..82f2d15ab 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -7,6 +7,7 @@ NotNullViolationError, OperationalError, SQLSpecError, + StackExecutionError, TransactionError, UniqueViolationError, ) @@ -42,3 +43,21 @@ def test_exception_chaining() -> None: except UniqueViolationError as exc: assert exc.__cause__ is not None assert isinstance(exc.__cause__, ValueError) + + +def test_stack_execution_error_includes_context() -> None: + base = StackExecutionError( + 2, + "SELECT * FROM users", + ValueError("boom"), + adapter="asyncpg", + mode="continue-on-error", + native_pipeline=False, + downgrade_reason="operator_override", + ) + + detail = str(base) + assert "operation 2" in detail + assert "asyncpg" in detail + assert "pipeline=disabled" in detail + assert "boom" in detail From 9b84e757a8414b9224381c8e6b413bf62acbef15 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Tue, 11 Nov 2025 01:17:17 +0000 Subject: [PATCH 02/10] feat(config): reject config classes in validation and add corresponding tests --- sqlspec/utils/config_resolver.py | 6 ++- tests/unit/test_config_resolver.py | 66 ++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/sqlspec/utils/config_resolver.py b/sqlspec/utils/config_resolver.py index ff5feb260..59adb2856 100644 --- a/sqlspec/utils/config_resolver.py +++ b/sqlspec/utils/config_resolver.py @@ -133,8 +133,12 @@ def _is_valid_config(config: Any) -> bool: config: Object to validate. Returns: - True if object appears to be a valid config. + True if object is a valid config instance (not a class). """ + # Reject config classes - must be instances + if isinstance(config, type): + return False + nested_config = getattr(config, "config", None) if nested_config is not None and hasattr(nested_config, "migration_config"): return True diff --git a/tests/unit/test_config_resolver.py b/tests/unit/test_config_resolver.py index 3d9dd5546..27225cd89 100644 --- a/tests/unit/test_config_resolver.py +++ b/tests/unit/test_config_resolver.py @@ -165,6 +165,72 @@ def incomplete_config() -> "IncompleteConfig": with pytest.raises(ConfigResolverError, match="returned invalid type"): await resolve_config_async("myapp.config.incomplete_config") + async def test_config_class_rejected(self) -> None: + """Test that config classes (not instances) are rejected. + + Note: This test directly validates that _is_valid_config rejects classes. + When using resolve_config_*, classes are callable and get instantiated, + so they don't reach direct validation as classes. + """ + from sqlspec.utils.config_resolver import _is_valid_config + + class MockConfigClass: + """Mock config class to simulate config classes being passed.""" + + database_url = "sqlite:///test.db" + bind_key = "test" + migration_config: dict[str, Any] = {} + + # Directly test that _is_valid_config rejects classes + assert isinstance(MockConfigClass, type), "Should be a class" + assert not _is_valid_config(MockConfigClass), "Classes should be rejected" + + # But instances should be accepted + instance = MockConfigClass() + assert not isinstance(instance, type), "Should be an instance" + assert _is_valid_config(instance), "Instances should be accepted" + + async def test_config_class_in_list_rejected(self) -> None: + """Test that config classes in a list are rejected.""" + mock_instance = Mock() + mock_instance.database_url = "sqlite:///test.db" + mock_instance.bind_key = "test" + mock_instance.migration_config = {} + + class MockConfigClass: + """Mock config class.""" + + database_url = "sqlite:///test.db" + bind_key = "test" + migration_config: dict[str, Any] = {} + + def mixed_list() -> list[Any]: + return [mock_instance, MockConfigClass] # Class, not instance + + with patch("sqlspec.utils.config_resolver.import_string", return_value=mixed_list): + with pytest.raises(ConfigResolverError, match="returned invalid config at index"): + await resolve_config_async("myapp.config.mixed_list") + + async def test_config_instance_accepted(self) -> None: + """Test that config instances (not classes) are accepted.""" + + class MockConfigClass: + """Mock config class.""" + + def __init__(self) -> None: + self.database_url = "sqlite:///test.db" + self.bind_key = "test" + self.migration_config: dict[str, Any] = {} + + # Pass an instance, not the class + mock_instance = MockConfigClass() + + with patch("sqlspec.utils.config_resolver.import_string", return_value=mock_instance): + result = await resolve_config_async("myapp.config.config_instance") + assert hasattr(result, "database_url") + assert hasattr(result, "bind_key") + assert hasattr(result, "migration_config") + class TestConfigResolverSync: """Test the synchronous wrapper for config resolver.""" From c051a4c898130026093fd206b2e210ae82108e25 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Tue, 11 Nov 2025 21:57:45 +0000 Subject: [PATCH 03/10] feat: add StatementStack tests and enhance pipeline execution across adapters - Implement tests for StatementStack sequential execution and continue-on-error behavior in BigQuery, DuckDB, Oracle, Psqlpy, Psycopg, and SQLite adapters. - Introduce edge case tests for StatementStack execution in SQLite, ensuring proper handling of empty stacks and mixed operations. - Enhance OracleAsyncDriver with pipeline support tests, verifying native pipeline execution and error handling. - Add metrics tracking for stack execution to monitor performance and error rates. - Create a utility script to run pre-commit hooks without requiring PTY support. --- .gitignore | 1 + .pre-commit-config.yaml | 1 + AGENTS.md | 19 + docs/changelog.rst | 8 + docs/examples/index.rst | 3 + docs/examples/query_stack_example.py | 98 ++++ docs/examples/query_stack_example.rst | 22 + docs/guides/README.md | 6 + docs/guides/adapters/adbc.md | 6 + docs/guides/adapters/aiosqlite.md | 5 + docs/guides/adapters/asyncmy.md | 9 + docs/guides/adapters/asyncpg.md | 25 + docs/guides/adapters/bigquery.md | 6 + docs/guides/adapters/duckdb.md | 31 +- docs/guides/adapters/oracledb.md | 16 + docs/guides/adapters/psqlpy.md | 9 + docs/guides/adapters/psycopg.md | 24 + docs/guides/adapters/sqlite.md | 23 +- docs/guides/architecture/patterns.md | 46 ++ docs/guides/performance/batch-execution.md | 50 ++ docs/reference/index.rst | 7 + docs/reference/query-stack.rst | 73 +++ docs/usage/index.rst | 3 + specs/guides/query-stack.md | 67 +++ sqlspec/adapters/asyncpg/__init__.py | 3 +- sqlspec/adapters/asyncpg/_types.py | 8 +- sqlspec/adapters/asyncpg/config.py | 3 +- sqlspec/adapters/asyncpg/driver.py | 154 ++++++- sqlspec/adapters/oracledb/driver.py | 432 +++++++++++++++++- sqlspec/adapters/psycopg/driver.py | 313 ++++++++++++- sqlspec/config.py | 6 +- sqlspec/core/__init__.py | 19 +- sqlspec/core/metrics.py | 83 ++++ sqlspec/core/result.py | 9 +- sqlspec/core/stack.py | 3 +- sqlspec/driver/__init__.py | 10 +- sqlspec/driver/_async.py | 113 +++-- sqlspec/driver/_common.py | 119 ++++- sqlspec/driver/_sync.py | 114 ++--- sqlspec/exceptions.py | 10 +- sqlspec/protocols.py | 5 +- .../test_adbc/test_adbc_driver.py | 48 +- .../test_aiosqlite/test_driver.py | 50 +- .../test_adapters/test_asyncmy/test_driver.py | 49 +- .../test_adapters/test_asyncpg/test_driver.py | 67 ++- .../test_bigquery/test_driver.py | 46 +- .../test_adapters/test_duckdb/test_driver.py | 48 +- .../test_adapters/test_oracledb/test_stack.py | 144 ++++++ .../test_adapters/test_psqlpy/test_driver.py | 47 +- .../test_psycopg/test_async_copy.py | 54 ++- .../test_adapters/test_psycopg/test_driver.py | 58 ++- .../test_adapters/test_sqlite/test_driver.py | 50 +- tests/integration/test_stack_edge_cases.py | 183 ++++++++ .../test_oracledb/test_pipeline_helpers.py | 121 +++++ tests/unit/test_config_resolver.py | 2 +- tests/unit/test_core/test_stack.py | 25 +- tests/unit/test_core/test_stack_metrics.py | 42 ++ tools/run_pre_commit.py | 57 +++ 58 files changed, 2852 insertions(+), 201 deletions(-) create mode 100644 docs/examples/query_stack_example.py create mode 100644 docs/examples/query_stack_example.rst create mode 100644 docs/guides/architecture/patterns.md create mode 100644 docs/guides/performance/batch-execution.md create mode 100644 docs/reference/query-stack.rst create mode 100644 specs/guides/query-stack.md create mode 100644 sqlspec/core/metrics.py create mode 100644 tests/integration/test_adapters/test_oracledb/test_stack.py create mode 100644 tests/integration/test_stack_edge_cases.py create mode 100644 tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py create mode 100644 tests/unit/test_core/test_stack_metrics.py create mode 100755 tools/run_pre_commit.py diff --git a/.gitignore b/.gitignore index 9b373ffa9..1852c137c 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ target/ .vscode/ .cursor/ .zed/ +.cache .coverage* # files **/*.so diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f8c8bf078..bc91f6d61 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,6 +43,7 @@ repos: rev: "v1.0.1" hooks: - id: sphinx-lint + args: ["--jobs", "1"] - repo: local hooks: - id: pypi-readme diff --git a/AGENTS.md b/AGENTS.md index 1134fa175..acfd26a06 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -185,6 +185,25 @@ SQLSpec is a type-safe SQL query mapper designed for minimal abstraction between - **Single-Pass Processing**: Parse once → transform once → validate once - SQL object is single source of truth - **Abstract Methods with Concrete Implementations**: Protocol defines abstract methods, base classes provide concrete sync/async implementations +### Query Stack Implementation Guidelines + +- **Builder Discipline** + - `StatementStack` and `StackOperation` are immutable (`__slots__`, tuple storage). Every push helper returns a new stack; never mutate `_operations` in place. + - Validate inputs at push time (non-empty SQL, execute_many payloads, reject nested stacks) so drivers can assume well-formed operations. +- **Adapter Responsibilities** + - Add a single capability gate per adapter (e.g., Oracle pipeline version check, `psycopg.capabilities.has_pipeline()`), return `super().execute_stack()` immediately when unsupported. + - Preserve `StackResult.raw_result` by building SQL/Arrow results via `create_sql_result()` / `create_arrow_result()` instead of copying row data. + - Honor manual toggles via `driver_features={"stack_native_disabled": True}` and document the behavior in the adapter guide. +- **Telemetry + Tracing** + - Always wrap adapter overrides with `StackExecutionObserver(self, stack, continue_on_error, native_pipeline=bool)`. + - Do **not** emit duplicate metrics; the observer already increments `stack.execute.*`, logs `stack.execute.start/complete/failed`, and publishes the `sqlspec.stack.execute` span. +- **Error Handling** + - Wrap driver exceptions in `StackExecutionError` with `operation_index`, summarized SQL (`describe_stack_statement()`), adapter name, and execution mode. + - Continue-on-error stacks append `StackResult.from_error()` and keep executing. Fail-fast stacks roll back (if they started the transaction) before re-raising the wrapped error. +- **Testing Expectations** + - Add integration tests under `tests/integration/test_adapters//test_driver.py::test_*statement_stack*` that cover native path, sequential fallback, and continue-on-error. + - Guard base behavior (empty stacks, large stacks, transaction boundaries) via `tests/integration/test_stack_edge_cases.py`. + ### Driver Parameter Profile Registry - All adapter parameter defaults live in `DriverParameterProfile` entries inside `sqlspec/core/parameters.py`. diff --git a/docs/changelog.rst b/docs/changelog.rst index 9a9a9cfbc..308a8b124 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -10,6 +10,14 @@ SQLSpec Changelog Recent Updates ============== +Query Stack Documentation Suite +-------------------------------- + +- Expanded the :doc:`/reference/query-stack` API reference (``StatementStack``, ``StackResult``, driver hooks, and ``StackExecutionError``) with the high-level workflow, execution modes, telemetry, and troubleshooting tips. +- Added :doc:`/examples/query_stack_example` that runs the same stack against SQLite and AioSQLite. +- Captured the detailed architecture and performance guidance inside the internal specs workspace for future agent runs. +- Updated every adapter reference with a **Query Stack Support** section so behavior is documented per database. + Migration Convenience Methods on Config Classes ------------------------------------------------ diff --git a/docs/examples/index.rst b/docs/examples/index.rst index 121af0bdb..01aa9a3c4 100644 --- a/docs/examples/index.rst +++ b/docs/examples/index.rst @@ -91,6 +91,8 @@ Patterns - Routing requests to dedicated SQLite configs per tenant slug. * - ``patterns/configs/multi_adapter_registry.py`` - Register multiple adapters on a single SQLSpec registry. + * - ``query_stack_example.py`` + - Immutable StatementStack workflow executed against SQLite and AioSQLite drivers. Loaders ------- @@ -142,4 +144,5 @@ Shared Utilities frameworks/starlette/aiosqlite_app frameworks/flask/sqlite_app patterns/configs/multi_adapter_registry + query_stack_example README diff --git a/docs/examples/query_stack_example.py b/docs/examples/query_stack_example.py new file mode 100644 index 000000000..b4b9b7906 --- /dev/null +++ b/docs/examples/query_stack_example.py @@ -0,0 +1,98 @@ +import asyncio + +from sqlspec import SQLSpec +from sqlspec.adapters.aiosqlite import AiosqliteConfig +from sqlspec.adapters.sqlite import SqliteConfig +from sqlspec.core import StatementStack + + +def build_stack(user_id: int, action: str) -> "StatementStack": + stack = ( + StatementStack() + .push_execute( + "INSERT INTO audit_log (user_id, action) VALUES (:user_id, :action)", {"user_id": user_id, "action": action} + ) + .push_execute( + "UPDATE users SET last_action = :action WHERE id = :user_id", {"action": action, "user_id": user_id} + ) + .push_execute("SELECT role FROM user_roles WHERE user_id = :user_id ORDER BY role", {"user_id": user_id}) + ) + return stack + + +def run_sync_example() -> None: + sql = SQLSpec() + config = SqliteConfig(pool_config={"database": ":memory:"}) + registry = sql.add_config(config) + + with sql.provide_session(registry) as session: + session.execute_script( + """ + CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_action TEXT); + CREATE TABLE IF NOT EXISTS audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + action TEXT NOT NULL + ); + CREATE TABLE IF NOT EXISTS user_roles ( + user_id INTEGER NOT NULL, + role TEXT NOT NULL + ); + INSERT INTO users (id, last_action) VALUES (1, 'start'); + INSERT INTO user_roles (user_id, role) VALUES (1, 'admin'), (1, 'editor'); + """ + ) + + stack = build_stack(user_id=1, action="sync-login") + results = session.execute_stack(stack) + + audit_insert, user_update, role_select = results + print("[sync] rows inserted:", audit_insert.rowcount) + print("[sync] rows updated:", user_update.rowcount) + if role_select.raw_result is not None: + print("[sync] roles:", [row["role"] for row in role_select.raw_result.data]) + + +def run_async_example() -> None: + async def _inner() -> None: + sql = SQLSpec() + config = AiosqliteConfig(pool_config={"database": ":memory:"}) + registry = sql.add_config(config) + + async with sql.provide_session(registry) as session: + await session.execute_script( + """ + CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_action TEXT); + CREATE TABLE IF NOT EXISTS audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + action TEXT NOT NULL + ); + CREATE TABLE IF NOT EXISTS user_roles ( + user_id INTEGER NOT NULL, + role TEXT NOT NULL + ); + INSERT INTO users (id, last_action) VALUES (2, 'start'); + INSERT INTO user_roles (user_id, role) VALUES (2, 'viewer'); + """ + ) + + stack = build_stack(user_id=2, action="async-login") + results = await session.execute_stack(stack, continue_on_error=False) + + audit_insert, user_update, role_select = results + print("[async] rows inserted:", audit_insert.rowcount) + print("[async] rows updated:", user_update.rowcount) + if role_select.raw_result is not None: + print("[async] roles:", [row["role"] for row in role_select.raw_result.data]) + + asyncio.run(_inner()) + + +def main() -> None: + run_sync_example() + run_async_example() + + +if __name__ == "__main__": + main() diff --git a/docs/examples/query_stack_example.rst b/docs/examples/query_stack_example.rst new file mode 100644 index 000000000..8fce8468d --- /dev/null +++ b/docs/examples/query_stack_example.rst @@ -0,0 +1,22 @@ +==================== +Query Stack Example +==================== + +This example builds an immutable ``StatementStack`` and executes it against both the synchronous SQLite adapter and the asynchronous AioSQLite adapter. Each stack: + +1. Inserts an audit log row +2. Updates the user's last action +3. Fetches the user's roles + +.. literalinclude:: query_stack_example.py + :language: python + :caption: ``docs/examples/query_stack_example.py`` + :linenos: + +Run the script: + +.. code-block:: console + + uv run python docs/examples/query_stack_example.py + +Expected output shows inserted/updated row counts plus the projected role list for each adapter. diff --git a/docs/guides/README.md b/docs/guides/README.md index 7540b0b0c..fa06dc5ec 100644 --- a/docs/guides/README.md +++ b/docs/guides/README.md @@ -36,6 +36,11 @@ Optimization guides for SQLSpec: - [**SQLglot Guide**](performance/sqlglot.md) - SQL parsing, transformation, and optimization with SQLglot - [**MyPyC Guide**](performance/mypyc.md) - Compilation strategies for high-performance Python code +- [**Batch Execution**](performance/batch-execution.md) - Guidance for Query Stack vs. ``execute_many`` across adapters + +## Features + +- [**Query Stack Guide**](features/query-stack.md) - Multi-statement execution, execution modes, telemetry, and troubleshooting ## Migrations @@ -55,6 +60,7 @@ Core architecture and design patterns: - [**Architecture Guide**](architecture/architecture.md) - SQLSpec architecture overview - [**Data Flow Guide**](architecture/data-flow.md) - How data flows through SQLSpec +- [**Architecture Patterns**](architecture/patterns.md) - Immutable stack builder, native vs. sequential branching, and telemetry requirements ## Extensions diff --git a/docs/guides/adapters/adbc.md b/docs/guides/adapters/adbc.md index 55e3ed5e8..e578713ff 100644 --- a/docs/guides/adapters/adbc.md +++ b/docs/guides/adapters/adbc.md @@ -17,6 +17,12 @@ This guide provides specific instructions for the `adbc` adapter. - **JSON Strategy:** `helper` (shared serializers wrap dict/list/tuple values) - **Extras:** `type_coercion_overrides` ensure Arrow arrays map to Python lists; PostgreSQL dialects attach a NULL-handling AST transformer +## Query Stack Support + +- Each ADBC backend falls back to SQLSpec's sequential stack executor. There is no driver-agnostic pipeline API today, so stacks simply reuse the same cursor management that individual `execute()` calls use, wrapped in a transaction when the backend supports it (e.g., PostgreSQL) and as independent statements when it does not (e.g., SQLite, DuckDB). +- Continue-on-error mode is supported on every backend. Successful statements commit as they finish, while failures populate `StackResult.error` for downstream inspection. +- Telemetry spans (`sqlspec.stack.execute`) and `StackExecutionMetrics` counters emit for all stacks, enabling observability parity with adapters that do have native optimizations. + ## Best Practices - **Arrow-Native:** The primary benefit of ADBC is its direct integration with Apache Arrow. Use it when you need to move large amounts of data efficiently between the database and data science tools like Pandas or Polars. diff --git a/docs/guides/adapters/aiosqlite.md b/docs/guides/adapters/aiosqlite.md index 6643e8227..d6bc0c42f 100644 --- a/docs/guides/adapters/aiosqlite.md +++ b/docs/guides/adapters/aiosqlite.md @@ -17,6 +17,11 @@ This guide provides specific instructions for the `aiosqlite` adapter. - **JSON Strategy:** `helper` (shared serializer handles dict/list/tuple inputs) - **Extras:** None (profile applies bool→int and ISO datetime coercions automatically) +## Query Stack Support + +- `StatementStack` executions always use the sequential fallback – SQLite has no notion of pipelined requests – so each operation runs one after another on the same connection. When `continue_on_error=False`, SQLSpec opens a transaction (if one is not already in progress) so the entire stack commits or rolls back together. With `continue_on_error=True`, statements are committed individually after each success. +- Because pooled in-memory connections share state, prefer per-test temporary database files when running stacks under pytest-xdist (see `tests/integration/test_adapters/test_aiosqlite/test_driver.py::test_aiosqlite_statement_stack_*` for the reference pattern). + ## Best Practices - **Async Only:** This is an asynchronous driver for SQLite. Use it in `asyncio` applications. diff --git a/docs/guides/adapters/asyncmy.md b/docs/guides/adapters/asyncmy.md index 1a98861f9..0d8cbf7cf 100644 --- a/docs/guides/adapters/asyncmy.md +++ b/docs/guides/adapters/asyncmy.md @@ -26,3 +26,12 @@ This guide covers `asyncmy`. - **`PyMySQL.err.OperationalError: (1366, ...)`**: Incorrect string value for a column. This is often due to character set issues. Ensure your connection and tables are using `utf8mb4`. - **Authentication Errors:** MySQL 8.0 and later use a different default authentication plugin (`caching_sha2_password`). If you have trouble connecting, you may need to configure the user account to use the older `mysql_native_password` plugin, though this is less secure. + +## Query Stack Support + +The MySQL wire protocol doesn't offer a pipeline/batch mode like Oracle or PostgreSQL, so `StatementStack` executions use the base sequential implementation: + +- All operations run one-by-one within the usual transaction rules (fail-fast stacks open a transaction, continue-on-error stacks stay in autocommit mode). +- Telemetry spans/metrics/logs are still emitted so you can trace stack executions in production. + +If you need reduced round-trips for MySQL/MariaDB, consider consolidating statements into stored procedures or batching logic within application-side transactions. diff --git a/docs/guides/adapters/asyncpg.md b/docs/guides/adapters/asyncpg.md index 791bce4c2..61aa66899 100644 --- a/docs/guides/adapters/asyncpg.md +++ b/docs/guides/adapters/asyncpg.md @@ -134,6 +134,31 @@ pip install cloud-alloydb-python-connector For comprehensive configuration options and troubleshooting, see the [Google Cloud Connectors Guide](/guides/cloud/google-connectors.md). +## Query Stack Support + +`StatementStack` calls execute in a single transaction when `continue_on_error=False`, leveraging asyncpg's fast extended-query protocol to minimize round-trips. When you need partial success handling (`continue_on_error=True`), the adapter automatically disables the shared transaction and reports individual failures via `StackResult.error`. + +- Telemetry spans (`sqlspec.stack.execute`), metrics (`stack.execute.*`), and hashed operation logging are emitted for every stack, so production monitoring captures adoption automatically. +- The pipeline path preserves `StackResult.raw_result` for SELECT statements, so downstream helpers continue to operate on the original `SQLResult` objects. +- To force the sequential fallback (for incident response or regression tests), pass `driver_features={"stack_native_disabled": True}` to the config. + +Example usage: + +```python +from sqlspec.core import StatementStack + +stack = ( + StatementStack() + .push_execute("INSERT INTO audit_log (message) VALUES ($1)", ("login",)) + .push_execute("UPDATE users SET last_login = NOW() WHERE id = $1", (user_id,)) + .push_execute("SELECT permissions FROM user_permissions WHERE user_id = $1", (user_id,)) +) + +results = await asyncpg_session.execute_stack(stack) +``` + +If you enable `continue_on_error=True`, the adapter returns three `StackResult` objects, each recording its own `error`/`warning` state without rolling the entire stack back. + ## MERGE Operations (PostgreSQL 15+) AsyncPG supports high-performance MERGE operations for bulk upserts using PostgreSQL's native MERGE statement with `jsonb_to_recordset()`. diff --git a/docs/guides/adapters/bigquery.md b/docs/guides/adapters/bigquery.md index c9d53e2be..70afcaa36 100644 --- a/docs/guides/adapters/bigquery.md +++ b/docs/guides/adapters/bigquery.md @@ -17,6 +17,12 @@ This guide provides specific instructions for the `bigquery` adapter. - **JSON Strategy:** `helper` with `json_tuple_strategy="tuple"` - **Extras:** `type_coercion_overrides` keep list values intact while converting tuples to lists during binding +## Query Stack Support + +- BigQuery does **not** expose a native pipeline API, so `StatementStack` calls execute sequentially through the core driver. Because BigQuery does not offer transactional semantics, the `begin()`/`commit()` hooks are no-ops—the stack still runs each statement in order and surfaces failures via `StackResult.error`. +- Continue-on-error mode is supported. Each failing operation records its own `StackExecutionError` while later statements continue to run, which is particularly helpful for long-running analytical batches. +- Telemetry spans (`sqlspec.stack.execute`) and `StackExecutionMetrics` counters are emitted for every stack execution, making it easy to monitor adoption even though the adapter falls back to the sequential path. + ## Best Practices - **Authentication:** BigQuery requires authentication with Google Cloud. For local development, the easiest way is to use the Google Cloud CLI and run `gcloud auth application-default login`. diff --git a/docs/guides/adapters/duckdb.md b/docs/guides/adapters/duckdb.md index 884364899..53baf545e 100644 --- a/docs/guides/adapters/duckdb.md +++ b/docs/guides/adapters/duckdb.md @@ -8,20 +8,27 @@ This guide provides specific instructions for the `duckdb` adapter. ## Key Information -- **Driver:** `duckdb` -- **Parameter Style:** `qmark` (e.g., `?`) +- **Driver:** `duckdb` +- **Parameter Style:** `qmark` (e.g., `?`) ## Parameter Profile -- **Registry Key:** `"duckdb"` -- **JSON Strategy:** `helper` (shared serializer covers dict/list/tuple) -- **Extras:** None (profile preserves existing `allow_mixed_parameter_styles=False`) +- **Registry Key:** `"duckdb"` +- **JSON Strategy:** `helper` (shared serializer covers dict/list/tuple) +- **Extras:** None (profile preserves existing `allow_mixed_parameter_styles=False`) + +## Query Stack Support + +- DuckDB does **not** expose a native multi-statement pipeline, so `StatementStack` always executes through the base sequential path. Transactions are created automatically when `continue_on_error=False`, matching the behavior of standalone `execute()` calls. +- SQLSpec still emits `stack.native_pipeline.skip` DEBUG logs and `stack.execute.path.sequential` metrics so operators can confirm the adapter is intentionally running in fallback mode. +- `continue_on_error=True` is supported: each failing statement records a `StackExecutionError` while later statements keep running, which is helpful when running analytical maintenance batches inside DuckDB. +- `tests/integration/test_adapters/test_duckdb/test_driver.py::test_duckdb_statement_stack_*` exercises the sequential + continue-on-error paths to guard against regressions. ## Best Practices -- **In-Memory vs. File:** DuckDB can run entirely in-memory (`:memory:`) or with a file-based database. In-memory is great for fast, temporary analytics. File-based is for persistence. -- **Extensions:** DuckDB has a rich ecosystem of extensions (e.g., for reading Parquet files, JSON, etc.). These can be loaded via the `sqlspec` configuration. -- **Vectorized Execution:** DuckDB is extremely fast for analytical queries due to its vectorized execution engine. Write queries that operate on columns rather than row-by-row. +- **In-Memory vs. File:** DuckDB can run entirely in-memory (`:memory:`) or with a file-based database. In-memory is great for fast, temporary analytics. File-based is for persistence. +- **Extensions:** DuckDB has a rich ecosystem of extensions (e.g., for reading Parquet files, JSON, etc.). These can be loaded via the `sqlspec` configuration. +- **Vectorized Execution:** DuckDB is extremely fast for analytical queries due to its vectorized execution engine. Write queries that operate on columns rather than row-by-row. ## Arrow Support (Native) @@ -53,12 +60,14 @@ with sql.provide_session() as session: ### Performance Characteristics **Native Arrow Benefits**: + - **Columnar-native format** - DuckDB already uses columnar storage - **Zero-copy data transfer** - direct Arrow output - **Optimal for analytics** - Perfect for OLAP workloads - **Parquet integration** - Seamless Arrow ↔ Parquet conversion **Best for**: + - Analytical queries on large datasets - Reading from Parquet, CSV, or JSON files - In-memory data transformations @@ -283,6 +292,6 @@ ds.write_dataset( ## Common Issues -- **`duckdb.IOException`**: Usually occurs when there are issues reading a file (e.g., a Parquet or CSV file). Check file paths and permissions. -- **Memory Management:** While fast, DuckDB can be memory-intensive. For large datasets, monitor memory usage and consider using a file-based database to allow for out-of-core processing. -- **`MissingDependencyError: pyarrow`**: Install Arrow support with `pip install sqlspec[arrow]` +- **`duckdb.IOException`**: Usually occurs when there are issues reading a file (e.g., a Parquet or CSV file). Check file paths and permissions. +- **Memory Management:** While fast, DuckDB can be memory-intensive. For large datasets, monitor memory usage and consider using a file-based database to allow for out-of-core processing. +- **`MissingDependencyError: pyarrow`**: Install Arrow support with `pip install sqlspec[arrow]` diff --git a/docs/guides/adapters/oracledb.md b/docs/guides/adapters/oracledb.md index 3217c8279..a8ab077d4 100644 --- a/docs/guides/adapters/oracledb.md +++ b/docs/guides/adapters/oracledb.md @@ -17,6 +17,22 @@ This guide provides specific instructions and best practices for working with th - **JSON Strategy:** `helper` (shared JSON serializer applied through the profile) - **Extras:** None (uses defaults with native list expansion disabled) +## Query Stack Support + +`StatementStack` executions automatically use python-oracledb's native pipeline APIs when the adapter detects a compatible runtime (Oracle Database 23ai+ and python-oracledb ≥ 2.4.0). The pipeline path batches every operation in a stack into a single round-trip while preserving the regular `StackResult.raw_result` semantics, so downstream helpers like `get_data()` or `rowcount` continue to work without code changes. + +### Requirements + +- Oracle Database 23ai or newer (`SELECT version FROM v$instance`) +- python-oracledb 2.4.0 or newer (thin **or** thick mode) +- Stacks that only contain `push_execute`/`push_execute_many` operations. `push_execute_arrow` and `push_execute_script` fall back to sequential execution automatically. + +### Telemetry and Overrides + +- Every stack execution emits `StackExecutionMetrics` counters (e.g., `stack.execute.invocations`, `stack.execute.path.native`, `stack.execute.partial_errors`) and a `sqlspec.stack.execute` tracing span whenever `ObservabilityRuntime` is enabled. These metrics include tags for the adapter, fail-fast vs. continue-on-error mode, native vs. sequential path, and the forced-disable flag so operators can chart adoption and error rates. +- When the pipeline is disabled because of driver/database version constraints, the adapter logs `stack.native_pipeline.skip` at `DEBUG` with reason codes such as `driver_version`, `database_version`, or `driver_api_missing` to make diagnosis straightforward. +- `driver_features={"stack_native_disabled": True}` forces sequential execution if you need to bypass the pipeline temporarily. + ## Thick vs. Thin Client The `oracledb` driver supports two modes: diff --git a/docs/guides/adapters/psqlpy.md b/docs/guides/adapters/psqlpy.md index b6965c016..95e536de6 100644 --- a/docs/guides/adapters/psqlpy.md +++ b/docs/guides/adapters/psqlpy.md @@ -163,6 +163,15 @@ For comparison: - **oracledb**: Has `_numpy_handlers.py` with `register_numpy_handlers()` - **psqlpy**: **No type handlers file** - all handled in Rust +## Query Stack Support + +`psqlpy` does **not** expose a pipeline or batch API beyond the standard execute/execute_many entry points, so SQLSpec intentionally keeps the base sequential stack implementation: + +- `execute_stack()` simply iterates operations using the shared transaction semantics from the driver base. +- Telemetry/logging still fire for observability, so stack executions remain traceable even without a performance boost. + +If you need reduced round-trips on PostgreSQL, prefer the `asyncpg` or `psycopg` adapters, both of which provide native stack overrides. + ## MERGE Operations (PostgreSQL 15+) Psqlpy supports MERGE operations for bulk upserts using PostgreSQL's native MERGE statement with `jsonb_to_recordset()`. diff --git a/docs/guides/adapters/psycopg.md b/docs/guides/adapters/psycopg.md index 4c5ca67c0..abb1b700f 100644 --- a/docs/guides/adapters/psycopg.md +++ b/docs/guides/adapters/psycopg.md @@ -27,6 +27,30 @@ The `psycopg` adapter supports the following driver features: - `enable_pgvector`: A boolean to enable or disable `pgvector` support. Defaults to `True` if `pgvector` is installed. +## Query Stack Support + +Psycopg 3 exposes libpq pipeline mode, and SQLSpec uses it automatically for `StatementStack` calls: + +- When `psycopg.capabilities.has_pipeline()` reports support (libpq 14+), `execute_stack()` wraps all operations in `with conn.pipeline():` to reduce round-trips. +- Fail-fast stacks (`continue_on_error=False`) run inside a single transaction; continue-on-error stacks run in autocommit mode so later statements can proceed even if earlier ones fail. +- All executions emit the standard stack telemetry metrics/spans/logs so you can observe adoption in production. +- Pass `driver_features={"stack_native_disabled": True}` if you need to disable pipeline mode temporarily (the adapter will fall back to the sequential base implementation). + +Example: + +```python +stack = ( + StatementStack() + .push_execute("INSERT INTO events (name, payload) VALUES (%s, %s)", ("login", payload)) + .push_execute("UPDATE users SET last_login = NOW() WHERE id = %s", (user_id,)) + .push_execute("SELECT permissions FROM user_permissions WHERE user_id = %s", (user_id,)) +) + +results = psycopg_session.execute_stack(stack) +``` + +When a statement fails and `continue_on_error=True`, its corresponding `StackResult` sets `error` while the other operations still run within the same pipeline block. + ## MERGE Operations (PostgreSQL 15+) Psycopg supports MERGE operations for bulk upserts using PostgreSQL's native MERGE statement with `jsonb_to_recordset()`. diff --git a/docs/guides/adapters/sqlite.md b/docs/guides/adapters/sqlite.md index 6d67461c7..a20d94f02 100644 --- a/docs/guides/adapters/sqlite.md +++ b/docs/guides/adapters/sqlite.md @@ -8,21 +8,26 @@ This guide covers `sqlite3` (sync) and `aiosqlite` (async). ## Key Information -- **Driver:** `sqlite3` (built-in), `aiosqlite` -- **Parameter Style:** `qmark` (e.g., `?`) +- **Driver:** `sqlite3` (built-in), `aiosqlite` +- **Parameter Style:** `qmark` (e.g., `?`) ## Parameter Profile -- **Registry Keys:** `"sqlite"` (sync), `"aiosqlite"` (async) -- **JSON Strategy:** `helper` for both drivers (shared serializer handles dict/list/tuple parameters) -- **Extras:** None (profiles apply ISO formatting for datetime/date and convert Decimal to string) +- **Registry Keys:** `"sqlite"` (sync), `"aiosqlite"` (async) +- **JSON Strategy:** `helper` for both drivers (shared serializer handles dict/list/tuple parameters) +- **Extras:** None (profiles apply ISO formatting for datetime/date and convert Decimal to string) + +## Query Stack Support + +- Neither `sqlite3` nor `aiosqlite` exposes a native batching primitive, so `StatementStack` reuses the base sequential executor. When `continue_on_error=False`, SQLSpec opens a transaction (if one is not already active) so the full stack succeeds or fails atomically; when `continue_on_error=True`, each statement commits immediately to match SQLite’s autocommit semantics. +- Integration coverage lives in `tests/integration/test_adapters/test_sqlite/test_driver.py::test_sqlite_statement_stack_*` and `tests/integration/test_adapters/test_aiosqlite/test_driver.py::test_aiosqlite_statement_stack_*`, ensuring both sync and async flows preserve `StackResult.raw_result` and surface per-statement errors. ## Best Practices -- **Use Cases:** Ideal for testing, local development, and embedded applications. Not suitable for high-concurrency production workloads. -- **In-Memory Databases:** For tests, use `:memory:` for the database name to create a fast, temporary database. -- **Foreign Keys:** Remember to enable foreign key support with `PRAGMA foreign_keys = ON;` if you need it, as it's off by default. +- **Use Cases:** Ideal for testing, local development, and embedded applications. Not suitable for high-concurrency production workloads. +- **In-Memory Databases:** For tests, use `:memory:` for the database name to create a fast, temporary database. +- **Foreign Keys:** Remember to enable foreign key support with `PRAGMA foreign_keys = ON;` if you need it, as it's off by default. ## Common Issues -- **`sqlite3.OperationalError: database is locked`**: This occurs when multiple threads/processes try to write to the same database file simultaneously. For testing, use separate database files or in-memory databases for each test process. +- **`sqlite3.OperationalError: database is locked`**: This occurs when multiple threads/processes try to write to the same database file simultaneously. For testing, use separate database files or in-memory databases for each test process. diff --git a/docs/guides/architecture/patterns.md b/docs/guides/architecture/patterns.md new file mode 100644 index 000000000..b4d022edf --- /dev/null +++ b/docs/guides/architecture/patterns.md @@ -0,0 +1,46 @@ +# Architecture Patterns + +This guide captures the key patterns introduced by Query Stack. Use it as the canonical reference when extending the feature or reviewing adapter contributions. + +## Immutable Stack Builder + +- ``StatementStack`` stores operations as tuples (method, statement, args, kwargs). Every mutating helper returns a **new** instance. +- Avoid dataclasses—``__slots__`` keeps the builder MyPy-friendly and mypyc-compatible. +- Share stacks freely across tasks/programs. There is no internal mutation after construction. +- Validation happens at push time (empty SQL, invalid execute_many payloads, nested stacks). Drivers can assume well-formed operations. + +## Native vs Sequential Branching + +- Base drivers (sync + async) handle sequential execution, transaction management, continue-on-error commits, and rollback safety. +- Adapter overrides should be thin wrappers that: + 1. Decide whether a native pipeline is available (version checks, capability flags). + 2. Fall back to ``super().execute_stack()`` immediately when native mode is unavailable. + 3. Convert native driver results back into ``StackResult`` without copying data. +- Keep capability gating deterministic—one probe function per adapter (e.g., Oracle’s pipeline version check, psycopg’s ``has_pipeline`` flag). + +## StackExecutionObserver Contract + +- Always wrap adapter-specific overrides with ``StackExecutionObserver`` using the correct ``native_pipeline`` flag. +- The observer emits: + - ``stack.execute.*`` metrics (invocations, statements, duration, partial errors, forced overrides) + - ``sqlspec.stack.execute`` tracing spans with hashed SQL identifiers + - Structured DEBUG/ERROR logs +- Adapters should **not** emit their own stack metrics; they only pass the correct context (continue_on_error, native pipeline flag). + +## Error Handling Pattern + +- Wrap driver exceptions in ``StackExecutionError`` with: + - ``operation_index`` + - ``sql`` summary (`describe_stack_statement`) + - ``adapter`` name + - ``mode`` (``fail-fast`` or ``continue-on-error``) +- Continue-on-error flows append ``StackResult.from_error(error)`` and keep executing. +- Fail-fast flows immediately raise the wrapped error after rolling back / cleaning state. + +## Adapter Checklist + +1. **Version / capability gate** native execution. +2. **Respect ``stack_native_disabled`` driver feature** if provided manually (useful for integration tests). +3. **Never mutate stack operations**—always compile to driver-specific statements first. +4. **Preserve ``StackResult.raw_result``** when possible (call ``StackResult.from_sql_result`` / ``from_arrow_result``). +5. **Guarantee cleanup** (`commit()`/`rollback()` in `finally` blocks) even for native pipelines. diff --git a/docs/guides/performance/batch-execution.md b/docs/guides/performance/batch-execution.md new file mode 100644 index 000000000..d3b9f1c3c --- /dev/null +++ b/docs/guides/performance/batch-execution.md @@ -0,0 +1,50 @@ +# Batch Execution Strategies + +Query Stack complements (not replaces) ``execute_many``. Use this guide to choose the right batching strategy per workload. + +## Query Stack vs ``execute_many`` + +| Scenario | Use Query Stack | Use ``execute_many`` | +| --- | --- | --- | +| Heterogeneous statements (audit INSERT + UPDATE + SELECT) | ✅ | ❌ | +| Single statement + many parameter sets | ❌ | ✅ | +| Need per-statement telemetry and error attribution | ✅ | ❌ | +| Simple bulk insert without control flow | ❌ | ✅ | + +## Adapter Optimizations + +- **Oracle 23ai+** – Uses ``oracledb.create_pipeline()`` / ``run_pipeline()`` for true single round-trips. +- **Psycopg 3 (libpq 14+)** – Uses pipeline mode to enqueue statements without waiting for results. +- **AsyncPG** – Reuses libpq’s extended protocol and caches prepared statements for repeated stacks. +- **Fallback Adapters** – Execute sequentially but still gain transactional bundling and telemetry. + +## Measuring Benefits + +1. Run workloads with `StackExecutionMetrics` enabled (the default) and export `stack.execute.*` counters. +2. Compare average duration in milliseconds between native vs sequential paths. +3. Use tracing spans to verify pipeline usage—``sqlspec.stack.native_pipeline=true`` indicates the optimized path. +4. Set up canaries with `driver_features={"stack_native_disabled": True}` if you need to toggle native mode manually during incident response. + +## Tuning Recommendations + +- **Group dependent statements**: keep related DML/SELECT blocks inside one stack to avoid extra round-trips. +- **Limit stack size**: avoid 100+ statement stacks on fallback adapters—split into logical phases so rollbacks stay manageable. +- **Watch transactions**: fail-fast stacks run inside a transaction when the driver is not already in one. Continue-on-error stacks auto-commit after each success. +- **Mix Arrow with SQL sparingly**: ``push_execute_arrow`` is available, but only include Arrow operations when the adapter supports it, or the driver will raise `StackExecutionError`. + +## Benchmark Template + +Use the following structure when adding performance tests (see Task 6.6): + +```python +from sqlspec.core import StatementStack + +stack = ( + StatementStack() + .push_execute("INSERT INTO audit_log (action) VALUES (:action)", {"action": "login"}) + .push_execute("UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = :id", {"id": 1}) + .push_execute("SELECT permissions FROM user_permissions WHERE user_id = :id", {"id": 1}) +) +``` + +Measure wall-clock time for native vs sequential execution, record round-trip counts (database logs or tracing), and publish the findings in ``docs/benchmarks/`` when Task 6.6 is complete. diff --git a/docs/reference/index.rst b/docs/reference/index.rst index f2dc0ef18..456712d97 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -53,6 +53,12 @@ Quick Navigation Fluent API for building SQL queries programmatically with method chaining. + .. grid-item-card:: Query Stack + :link: query-stack + :link-type: doc + + Immutable multi-statement execution with native pipelines, sequential fallbacks, and stack-aware telemetry. + .. grid-item-card:: Core Components :link: core :link-type: doc @@ -115,6 +121,7 @@ Available API References base adapters builder + query-stack core driver extensions diff --git a/docs/reference/query-stack.rst b/docs/reference/query-stack.rst new file mode 100644 index 000000000..03fb0c64c --- /dev/null +++ b/docs/reference/query-stack.rst @@ -0,0 +1,73 @@ +============= +Query Stack +============= + +The Query Stack APIs let you compose multiple SQL operations into an immutable ``StatementStack`` and execute them in a single driver call. Each operation preserves the underlying ``SQLResult``/``ArrowResult`` so downstream helpers continue to work without copying data. + +.. contents:: Table of Contents + :local: + :depth: 2 + +Overview +======== + +The stack system is composed of: + +- ``StatementStack`` – immutable builder with push helpers for execute/execute_many/execute_script/execute_arrow +- ``StackOperation`` – the tuple-like value object stored inside the stack (method, statement, arguments, keyword arguments) +- ``StackResult`` – wraps the driver’s raw result while surfacing stack metadata (rowcount, warning, error) +- ``AsyncDriverAdapterBase.execute_stack`` / ``SyncDriverAdapterBase.execute_stack`` – adapter hooks that select native pipelines or the sequential fallback + +StatementStack +============== + +.. currentmodule:: sqlspec.core.stack + +.. autoclass:: StatementStack + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: StackOperation + :members: + :undoc-members: + :show-inheritance: + +StackResult +=========== + +.. currentmodule:: sqlspec.core.result + +.. autoclass:: StackResult + :members: + :undoc-members: + :show-inheritance: + +Driver APIs +=========== + +.. currentmodule:: sqlspec.driver._async + +.. automethod:: AsyncDriverAdapterBase.execute_stack + +.. currentmodule:: sqlspec.driver._sync + +.. automethod:: SyncDriverAdapterBase.execute_stack + +Exceptions +========== + +.. currentmodule:: sqlspec.exceptions + +.. autoclass:: StackExecutionError + :members: + :undoc-members: + :show-inheritance: + +Usage Highlights +================ + +- Build stacks once and reuse them across requests/tasks. +- Call ``session.execute_stack(stack, continue_on_error=False)`` to run fail-fast or set ``continue_on_error=True`` to record per-operation errors. +- Inspect ``StackResult.raw_result`` to call helpers like ``all()``, ``one()``, ``to_pandas()``, or ``to_arrow()``. +- :doc:`/reference/adapters` lists per-adapter capabilities, including whether native pipelines or sequential fallback are used for stacks. diff --git a/docs/usage/index.rst b/docs/usage/index.rst index 3ad038383..54894963e 100644 --- a/docs/usage/index.rst +++ b/docs/usage/index.rst @@ -33,6 +33,9 @@ SQLSpec provides a unified interface for database operations across multiple bac **Query Builder** Explore the experimental fluent API for programmatically constructing SQL queries. +**Query Stack** + Learn how to batch heterogeneous SQL statements with immutable stacks, choose between fail-fast and continue-on-error execution, and monitor native vs. sequential paths in :doc:`/reference/query-stack`. + **SQL Files** Manage SQL statements from files using the aiosql-style loader. diff --git a/specs/guides/query-stack.md b/specs/guides/query-stack.md new file mode 100644 index 000000000..a5310a7ee --- /dev/null +++ b/specs/guides/query-stack.md @@ -0,0 +1,67 @@ +# Query Stack Guide + +Query Stack executes multiple SQL statements in a single driver call while preserving raw SQL semantics. Each stack is immutable, MyPy-friendly, and can be shared across asyncio tasks or worker threads without synchronization. + +## When to Use Query Stack + +- Multi-step workflows (audit insert + update + permission read) that would otherwise require multiple round-trips. +- Adapter-specific native pipelines (Oracle 23ai+, psycopg pipeline mode, asyncpg batch execution) where batching reduces latency. +- Sequential fallback adapters (SQLite, DuckDB, BigQuery, ADBC, AsyncMy) when you still want the ergonomic benefits of a single API call. +- Continue-on-error workflows that need to run every statement but report failures alongside successful operations. + +## Building StatementStack Instances + +1. Start with an empty stack: `stack = StatementStack()`. +2. Add operations via the push helpers (each returns a new instance): + - `.push_execute(sql, parameters, *, statement_config=None, **kwargs)` + - `.push_execute_many(sql, parameter_sets, *filters, statement_config=None, **kwargs)` + - `.push_execute_script(sql, *filters, statement_config=None, **kwargs)` + - `.push_execute_arrow(sql, *filters, statement_config=None, **kwargs)` +3. Use `.extend()` or `StatementStack.from_operations()` to combine stacks. +4. Store stacks at module scope or factory functions—the tuple-based storage makes them hashable and thread-safe. + +## Execution Modes + +`Session.execute_stack(stack, continue_on_error=False)` mirrors the driver’s transaction rules: + +- **Fail-fast (default):** The driver creates a transaction if one is not already active. Any failure raises `StackExecutionError` and rolls back the transaction. +- **Continue-on-error:** Each operation commits immediately. Failures still raise `StackExecutionError`, but execution continues and the error is preserved on the corresponding `StackResult`. + +When using adapters with native pipelines (Oracle, psycopg, asyncpg), continue-on-error downgrades to sequential mode if the native API cannot honor the semantics (e.g., psycopg pipeline requires fail-fast). + +## Transaction Boundaries + +- Existing transactions are respected—`execute_stack()` never commits or rolls back a transaction it did not create. +- For fail-fast stacks, drivers call `begin()`/`commit()` (or `rollback()` on error) only when no transaction is active. +- Continue-on-error uses commit/rollback hooks after each operation to keep the connection clean. + +## Arrow Operations + +`push_execute_arrow()` delegates to `select_to_arrow()` when the adapter implements Arrow support (DuckDB, BigQuery, ADBC, etc.). The returned `StackResult.raw_result` is an `ArrowResult`, so downstream helpers like `to_pandas()` or `to_polars()` continue to work. + +## Telemetry and Tracing + +Every stack execution routes through `StackExecutionObserver`, which provides: + +- `StackExecutionMetrics`: increments `stack.execute.*` counters (invocations, statements, partial errors, duration) for any observability runtime (built-in logger, OTLP exporter, Prometheus bridge, etc.). +- `sqlspec.stack.execute` tracing spans containing adapter, statement count, native pipeline flag, continue-on-error, and hashed SQL identifiers. +- Structured DEBUG/ERROR logs (`stack.execute.start`, `stack.execute.complete`, `stack.execute.failed`). + +Adapters only need to report whether they used a native pipeline; the observer handles the rest. + +## Troubleshooting + +| Symptom | Cause | Fix | +| --- | --- | --- | +| `ValueError: Cannot execute an empty StatementStack` | Stack has zero operations | Ensure you push at least one statement before calling `execute_stack()` | +| `StackExecutionError(operation_index=1, ...)` | Driver error on a specific statement | Inspect `StackResult.error` to see the wrapped exception; use `StackResult.raw_result` to inspect partial data | +| `push_execute_many` raising `TypeError` | Parameter payload not a sequence | Pass an actual list/tuple of parameter sets | +| Continue-on-error seems to run sequentially on psycopg | Psycopg pipeline mode does not support partial failures | Expected—SQLSpec downgrades to sequential mode automatically | + +## Related Resources + +- [Query Stack API Reference](/reference/query-stack) +- :doc:`/examples/query_stack_example` +- [Adapter Guides](/guides/adapters/) for native vs. fallback behavior per database + +Use the new :doc:`/reference/query-stack` page for low-level API details and :doc:`/examples/query_stack_example` to see the end-to-end workflow. diff --git a/sqlspec/adapters/asyncpg/__init__.py b/sqlspec/adapters/asyncpg/__init__.py index b504fa9ba..afe1934ba 100644 --- a/sqlspec/adapters/asyncpg/__init__.py +++ b/sqlspec/adapters/asyncpg/__init__.py @@ -1,6 +1,6 @@ """AsyncPG adapter for SQLSpec.""" -from sqlspec.adapters.asyncpg._types import AsyncpgConnection, AsyncpgPool +from sqlspec.adapters.asyncpg._types import AsyncpgConnection, AsyncpgPool, AsyncpgPreparedStatement from sqlspec.adapters.asyncpg.config import AsyncpgConfig, AsyncpgConnectionConfig, AsyncpgPoolConfig from sqlspec.adapters.asyncpg.driver import ( AsyncpgCursor, @@ -18,5 +18,6 @@ "AsyncpgExceptionHandler", "AsyncpgPool", "AsyncpgPoolConfig", + "AsyncpgPreparedStatement", "asyncpg_statement_config", ) diff --git a/sqlspec/adapters/asyncpg/_types.py b/sqlspec/adapters/asyncpg/_types.py index b55f48359..f33b8ad03 100644 --- a/sqlspec/adapters/asyncpg/_types.py +++ b/sqlspec/adapters/asyncpg/_types.py @@ -6,16 +6,18 @@ from typing import TypeAlias from asyncpg import Connection, Pool, Record + from asyncpg.prepared_stmt import PreparedStatement - -if TYPE_CHECKING: AsyncpgConnection: TypeAlias = Connection[Record] | PoolConnectionProxy[Record] AsyncpgPool: TypeAlias = Pool[Record] + AsyncpgPreparedStatement: TypeAlias = PreparedStatement[Record] else: from asyncpg import Pool + from asyncpg.prepared_stmt import PreparedStatement AsyncpgConnection = PoolConnectionProxy AsyncpgPool = Pool + AsyncpgPreparedStatement = PreparedStatement -__all__ = ("AsyncpgConnection", "AsyncpgPool") +__all__ = ("AsyncpgConnection", "AsyncpgPool", "AsyncpgPreparedStatement") diff --git a/sqlspec/adapters/asyncpg/config.py b/sqlspec/adapters/asyncpg/config.py index a9af9dd39..21c95d7ff 100644 --- a/sqlspec/adapters/asyncpg/config.py +++ b/sqlspec/adapters/asyncpg/config.py @@ -12,7 +12,7 @@ from typing_extensions import NotRequired from sqlspec.adapters.asyncpg._type_handlers import register_json_codecs, register_pgvector_support -from sqlspec.adapters.asyncpg._types import AsyncpgConnection, AsyncpgPool +from sqlspec.adapters.asyncpg._types import AsyncpgConnection, AsyncpgPool, AsyncpgPreparedStatement from sqlspec.adapters.asyncpg.driver import ( AsyncpgCursor, AsyncpgDriver, @@ -459,5 +459,6 @@ def get_signature_namespace(self) -> "dict[str, Any]": "AsyncpgExceptionHandler": AsyncpgExceptionHandler, "AsyncpgPool": AsyncpgPool, "AsyncpgPoolConfig": AsyncpgPoolConfig, + "AsyncpgPreparedStatement": AsyncpgPreparedStatement, }) return namespace diff --git a/sqlspec/adapters/asyncpg/driver.py b/sqlspec/adapters/asyncpg/driver.py index 263c5755d..f67864eb4 100644 --- a/sqlspec/adapters/asyncpg/driver.py +++ b/sqlspec/adapters/asyncpg/driver.py @@ -2,20 +2,26 @@ import datetime import re -from typing import TYPE_CHECKING, Any, Final, cast +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Final, NamedTuple, cast import asyncpg from sqlspec.core import ( DriverParameterProfile, ParameterStyle, + StackOperation, + StackResult, + StatementStack, build_statement_config_from_profile, + create_sql_result, get_cache_config, is_copy_from_operation, is_copy_operation, register_driver_profile, ) from sqlspec.driver import AsyncDriverAdapterBase +from sqlspec.driver._common import StackExecutionObserver, describe_stack_statement from sqlspec.exceptions import ( CheckViolationError, DatabaseConnectionError, @@ -26,6 +32,7 @@ OperationalError, SQLParsingError, SQLSpecError, + StackExecutionError, TransactionError, UniqueViolationError, ) @@ -36,7 +43,7 @@ from collections.abc import Callable from contextlib import AbstractAsyncContextManager - from sqlspec.adapters.asyncpg._types import AsyncpgConnection + from sqlspec.adapters.asyncpg._types import AsyncpgConnection, AsyncpgPreparedStatement from sqlspec.core import SQL, ArrowResult, ParameterStyleConfig, SQLResult, StatementConfig from sqlspec.driver import AsyncDataDictionaryBase, ExecutionResult from sqlspec.storage import ( @@ -59,6 +66,15 @@ logger = get_logger("adapters.asyncpg") +class _NormalizedStackOperation(NamedTuple): + """Normalized execution metadata used for prepared stack operations.""" + + operation: "StackOperation" + statement: "SQL" + sql: str + parameters: "tuple[Any, ...] | dict[str, Any] | None" + + ASYNC_PG_STATUS_REGEX: Final[re.Pattern[str]] = re.compile(r"^([A-Z]+)(?:\s+(\d+))?\s+(\d+)$", re.IGNORECASE) EXPECTED_REGEX_GROUPS: Final[int] = 3 @@ -178,6 +194,9 @@ def _raise_generic_error(self, e: Any, code: "str | None") -> None: raise SQLSpecError(msg) from e +PREPARED_STATEMENT_CACHE_SIZE: Final[int] = 32 + + class AsyncpgDriver(AsyncDriverAdapterBase): """AsyncPG PostgreSQL driver for async database operations. @@ -186,7 +205,7 @@ class AsyncpgDriver(AsyncDriverAdapterBase): and caching, and parameter processing with type coercion. """ - __slots__ = ("_data_dictionary",) + __slots__ = ("_data_dictionary", "_prepared_statements") dialect = "postgres" def __init__( @@ -206,6 +225,7 @@ def __init__( super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features) self._data_dictionary: AsyncDataDictionaryBase | None = None + self._prepared_statements: OrderedDict[str, AsyncpgPreparedStatement] = OrderedDict() def with_cursor(self, connection: "AsyncpgConnection") -> "AsyncpgCursor": """Create context manager for AsyncPG cursor.""" @@ -312,6 +332,62 @@ async def _execute_many(self, cursor: "AsyncpgConnection", statement: "SQL") -> return self.create_execution_result(cursor, rowcount_override=affected_rows, is_many_result=True) + async def execute_stack( + self, stack: "StatementStack", *, continue_on_error: bool = False + ) -> "tuple[StackResult, ...]": + """Execute a StatementStack using asyncpg's rapid batching.""" + + if not isinstance(stack, StatementStack) or not stack or self.stack_native_disabled: + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + return await self._execute_stack_native(stack, continue_on_error=continue_on_error) + + async def _execute_stack_native( + self, stack: "StatementStack", *, continue_on_error: bool + ) -> "tuple[StackResult, ...]": + results: list[StackResult] = [] + + async def _run_operations(observer: StackExecutionObserver) -> None: + for index, operation in enumerate(stack.operations): + try: + normalized = None + if operation.method == "execute": + normalized = self._normalize_stack_execute_operation(operation) + + if normalized is not None and self._can_prepare_stack_operation(normalized): + stack_result = await self._execute_stack_operation_prepared(normalized) + else: + raw_result = await self._execute_stack_operation(operation) + stack_result = StackResult(raw_result=raw_result) + except Exception as exc: + stack_error = StackExecutionError( + index, + describe_stack_statement(operation.statement), + exc, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", + ) + if continue_on_error: + observer.record_operation_error(stack_error) + results.append(StackResult.from_error(stack_error)) + continue + raise stack_error from exc + + results.append(stack_result) + + transaction_cm = None + if not continue_on_error and not self._connection_in_transaction(): + transaction_cm = self.connection.transaction() + + with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=True) as observer: + if transaction_cm is not None: + async with transaction_cm: + await _run_operations(observer) + else: + await _run_operations(observer) + + return tuple(results) + async def _execute_statement(self, cursor: "AsyncpgConnection", statement: "SQL") -> "ExecutionResult": """Execute single SQL statement. @@ -342,6 +418,66 @@ async def _execute_statement(self, cursor: "AsyncpgConnection", statement: "SQL" return self.create_execution_result(cursor, rowcount_override=affected_rows) + def _can_prepare_stack_operation(self, normalized: "_NormalizedStackOperation") -> bool: + statement = normalized.statement + return not statement.is_script and not statement.is_many + + async def _execute_stack_operation_prepared(self, normalized: "_NormalizedStackOperation") -> StackResult: + prepared = await self._get_prepared_statement(normalized.sql) + metadata = {"prepared_statement": True} + + if normalized.statement.returns_rows(): + rows = await self._invoke_prepared(prepared, normalized.parameters, fetch=True) + data = [dict(row) for row in rows] + sql_result = create_sql_result(normalized.statement, data=data, rows_affected=len(data), metadata=metadata) + return StackResult.from_sql_result(sql_result) + + status = await self._invoke_prepared(prepared, normalized.parameters, fetch=False) + rowcount = self._parse_asyncpg_status(status) if isinstance(status, str) else 0 + sql_result = create_sql_result(normalized.statement, rows_affected=rowcount, metadata=metadata) + return StackResult.from_sql_result(sql_result) + + def _normalize_stack_execute_operation(self, operation: "StackOperation") -> "_NormalizedStackOperation": + if operation.method != "execute": + msg = "Prepared execution only supports execute operations" + raise TypeError(msg) + + kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {} + statement_config = kwargs.pop("statement_config", None) + config = statement_config or self.statement_config + + sql_statement = self.prepare_statement( + operation.statement, operation.arguments, statement_config=config, kwargs=kwargs + ) + sql_text, prepared_parameters = self._get_compiled_sql(sql_statement, config) + return _NormalizedStackOperation( + operation=operation, statement=sql_statement, sql=sql_text, parameters=prepared_parameters + ) + + async def _invoke_prepared( + self, + prepared: "AsyncpgPreparedStatement", + parameters: "tuple[Any, ...] | dict[str, Any] | list[Any] | None", + *, + fetch: bool, + ) -> Any: + if parameters is None: + if fetch: + return await prepared.fetch() + await prepared.fetch() + return prepared.get_statusmsg() + + if isinstance(parameters, dict): + if fetch: + return await prepared.fetch(**parameters) + await prepared.fetch(**parameters) + return prepared.get_statusmsg() + + if fetch: + return await prepared.fetch(*parameters) + await prepared.fetch(*parameters) + return prepared.get_statusmsg() + async def select_to_storage( self, statement: "SQL | str", @@ -455,6 +591,18 @@ async def commit(self) -> None: msg = f"Failed to commit async transaction: {e}" raise SQLSpecError(msg) from e + async def _get_prepared_statement(self, sql: str) -> "AsyncpgPreparedStatement": + cached = self._prepared_statements.get(sql) + if cached is not None: + self._prepared_statements.move_to_end(sql) + return cached + + prepared = cast("AsyncpgPreparedStatement", await self.connection.prepare(sql)) + self._prepared_statements[sql] = prepared + if len(self._prepared_statements) > PREPARED_STATEMENT_CACHE_SIZE: + self._prepared_statements.popitem(last=False) + return prepared + @property def data_dictionary(self) -> "AsyncDataDictionaryBase": """Get the data dictionary for this driver. diff --git a/sqlspec/adapters/oracledb/driver.py b/sqlspec/adapters/oracledb/driver.py index aa639fa6d..62cc8dd08 100644 --- a/sqlspec/adapters/oracledb/driver.py +++ b/sqlspec/adapters/oracledb/driver.py @@ -3,7 +3,7 @@ import contextlib import logging import re -from typing import TYPE_CHECKING, Any, Final, cast +from typing import TYPE_CHECKING, Any, Final, NamedTuple, cast import oracledb from oracledb import AsyncCursor, Cursor @@ -15,9 +15,12 @@ SQL, DriverParameterProfile, ParameterStyle, + StackResult, StatementConfig, + StatementStack, build_statement_config_from_profile, create_arrow_result, + create_sql_result, get_cache_config, register_driver_profile, ) @@ -27,6 +30,7 @@ SyncDataDictionaryBase, SyncDriverAdapterBase, ) +from sqlspec.driver._common import StackExecutionObserver, VersionInfo, describe_stack_statement, hash_stack_operations from sqlspec.exceptions import ( CheckViolationError, DatabaseConnectionError, @@ -37,17 +41,22 @@ OperationalError, SQLParsingError, SQLSpecError, + StackExecutionError, TransactionError, UniqueViolationError, ) +from sqlspec.utils.logging import log_with_context from sqlspec.utils.module_loader import ensure_pyarrow from sqlspec.utils.serializers import to_json if TYPE_CHECKING: + from collections.abc import Sequence from contextlib import AbstractAsyncContextManager, AbstractContextManager + from typing import Protocol from sqlspec.builder import QueryBuilder - from sqlspec.core import ArrowResult, SQLResult, Statement, StatementFilter + from sqlspec.core import ArrowResult, SQLResult, Statement, StatementConfig, StatementFilter + from sqlspec.core.stack import StackOperation from sqlspec.driver import ExecutionResult from sqlspec.storage import ( AsyncStoragePipeline, @@ -59,6 +68,22 @@ ) from sqlspec.typing import ArrowReturnFormat, StatementParameters + class _PipelineDriver(Protocol): + statement_config: StatementConfig + driver_features: "dict[str, Any]" + + def prepare_statement( + self, + statement: "str | Statement | QueryBuilder", + parameters: "tuple[Any, ...] | dict[str, Any] | None", + *, + statement_config: StatementConfig, + kwargs: "dict[str, Any]", + ) -> SQL: ... + + def _get_compiled_sql(self, statement: SQL, statement_config: StatementConfig) -> "tuple[str, Any]": ... + + logger = logging.getLogger(__name__) # Oracle-specific constants @@ -77,6 +102,215 @@ "oracledb_statement_config", ) +PIPELINE_MIN_DRIVER_VERSION: Final[tuple[int, int, int]] = (2, 4, 0) +PIPELINE_MIN_DATABASE_MAJOR: Final[int] = 23 +_VERSION_COMPONENTS: Final[int] = 3 + + +def _parse_version_tuple(version: str) -> "tuple[int, int, int]": + parts = [int(part) for part in version.split(".") if part.isdigit()] + while len(parts) < _VERSION_COMPONENTS: + parts.append(0) + return parts[0], parts[1], parts[2] + + +_ORACLEDB_VERSION: Final[tuple[int, int, int]] = _parse_version_tuple(getattr(oracledb, "__version__", "0.0.0")) + + +class _CompiledStackOperation(NamedTuple): + statement: SQL + sql: str + parameters: Any + method: str + returns_rows: bool + summary: str + + +class OraclePipelineMixin: + """Shared helpers for Oracle pipeline execution.""" + + __slots__ = () + + def _pipeline_driver(self) -> "_PipelineDriver": + return cast("_PipelineDriver", self) + + def _stack_native_blocker(self, stack: "StatementStack") -> "str | None": + for operation in stack.operations: + if operation.method == "execute_arrow": + return "arrow_operation" + if operation.method == "execute_script": + return "script_operation" + return None + + def _log_pipeline_skip(self, reason: str, stack: "StatementStack") -> None: + log_level = logging.INFO if reason == "env_override" else logging.DEBUG + log_with_context( + logger, + log_level, + "stack.native_pipeline.skip", + driver=type(self).__name__, + reason=reason, + hashed_operations=hash_stack_operations(stack), + ) + + def _prepare_pipeline_operation(self, operation: "StackOperation") -> _CompiledStackOperation: + driver = self._pipeline_driver() + kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {} + statement_config = kwargs.pop("statement_config", None) + config = statement_config or driver.statement_config + + if operation.method == "execute": + sql_statement = driver.prepare_statement( + operation.statement, operation.arguments, statement_config=config, kwargs=kwargs + ) + elif operation.method == "execute_many": + if not operation.arguments: + msg = "execute_many stack operation requires parameter sets" + raise ValueError(msg) + parameter_sets = operation.arguments[0] + filters = operation.arguments[1:] + sql_statement = self._build_execute_many_statement( + operation.statement, parameter_sets, filters, config, kwargs + ) + else: + msg = f"Unsupported stack operation method: {operation.method}" + raise ValueError(msg) + + compiled_sql, prepared_parameters = driver._get_compiled_sql( # pyright: ignore[reportPrivateUsage] + sql_statement, config + ) + summary = describe_stack_statement(operation.statement) + return _CompiledStackOperation( + statement=sql_statement, + sql=compiled_sql, + parameters=prepared_parameters, + method=operation.method, + returns_rows=sql_statement.returns_rows(), + summary=summary, + ) + + def _build_execute_many_statement( + self, + statement: "str | Statement | QueryBuilder", + parameter_sets: "Sequence[StatementParameters]", + filters: "tuple[StatementParameters | StatementFilter, ...]", + statement_config: "StatementConfig", + kwargs: "dict[str, Any]", + ) -> SQL: + driver = self._pipeline_driver() + if isinstance(statement, SQL): + return SQL(statement.raw_sql, parameter_sets, statement_config=statement_config, is_many=True, **kwargs) + + base_statement = driver.prepare_statement(statement, filters, statement_config=statement_config, kwargs=kwargs) + return SQL(base_statement.raw_sql, parameter_sets, statement_config=statement_config, is_many=True, **kwargs) + + def _add_pipeline_operation(self, pipeline: Any, operation: _CompiledStackOperation) -> None: + parameters = operation.parameters or [] + if operation.method == "execute": + if operation.returns_rows: + pipeline.add_fetchall(operation.sql, parameters) + else: + pipeline.add_execute(operation.sql, parameters) + return + + if operation.method == "execute_many": + pipeline.add_executemany(operation.sql, parameters) + return + + msg = f"Unsupported pipeline operation: {operation.method}" + raise ValueError(msg) + + def _build_stack_results_from_pipeline( + self, + compiled_operations: "Sequence[_CompiledStackOperation]", + pipeline_results: "Sequence[Any]", + continue_on_error: bool, + observer: StackExecutionObserver, + ) -> "list[StackResult]": + stack_results: list[StackResult] = [] + for index, (compiled, result) in enumerate(zip(compiled_operations, pipeline_results, strict=False)): + error = getattr(result, "error", None) + if error is not None: + stack_error = StackExecutionError( + index, + compiled.summary, + error, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", + ) + if continue_on_error: + observer.record_operation_error(stack_error) + stack_results.append(StackResult.from_error(stack_error)) + continue + raise stack_error + + stack_results.append(self._pipeline_result_to_stack_result(compiled, result)) + return stack_results + + def _pipeline_result_to_stack_result(self, operation: _CompiledStackOperation, pipeline_result: Any) -> StackResult: + rows = getattr(pipeline_result, "rows", None) + columns = getattr(pipeline_result, "columns", None) + data = self._rows_from_pipeline_result(columns, rows) if operation.returns_rows else None + metadata: dict[str, Any] = {"pipeline_operation": operation.method} + + warning = getattr(pipeline_result, "warning", None) + if warning is not None: + metadata["warning"] = warning + + return_value = getattr(pipeline_result, "return_value", None) + if return_value is not None: + metadata["return_value"] = return_value + + rowcount = self._rows_affected_from_pipeline(operation, pipeline_result, data) + sql_result = create_sql_result(operation.statement, data=data, rows_affected=rowcount, metadata=metadata) + return StackResult.from_sql_result(sql_result) + + def _rows_affected_from_pipeline( + self, operation: _CompiledStackOperation, pipeline_result: Any, data: "list[dict[str, Any]] | None" + ) -> int: + rowcount = getattr(pipeline_result, "rowcount", None) + if isinstance(rowcount, int) and rowcount >= 0: + return rowcount + if operation.method == "execute_many": + parameter_sets = operation.parameters or () + try: + return len(parameter_sets) + except TypeError: + return 0 + if operation.method == "execute" and not operation.returns_rows: + return 1 + if operation.returns_rows: + return len(data or []) + return 0 + + def _rows_from_pipeline_result(self, columns: Any, rows: Any) -> "list[dict[str, Any]]": + if not rows: + return [] + + driver = self._pipeline_driver() + if columns: + names = [getattr(column, "name", f"column_{index}") for index, column in enumerate(columns)] + else: + first = rows[0] + names = [f"column_{index}" for index in range(len(first) if hasattr(first, "__len__") else 0)] + names = _normalize_column_names(names, driver.driver_features) + + normalized_rows: list[dict[str, Any]] = [] + for row in rows: + if isinstance(row, dict): + normalized_rows.append(row) + continue + normalized_rows.append(dict(zip(names, row, strict=False))) + return normalized_rows + + def _wrap_pipeline_error( + self, error: Exception, stack: "StatementStack", continue_on_error: bool + ) -> StackExecutionError: + mode = "continue-on-error" if continue_on_error else "fail-fast" + return StackExecutionError( + -1, "Oracle pipeline execution failed", error, adapter=type(self).__name__, mode=mode + ) + def _normalize_column_names(column_names: "list[str]", driver_features: "dict[str, Any]") -> "list[str]": should_lowercase = driver_features.get("enable_lowercase_column_names", False) @@ -333,14 +567,14 @@ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._map_oracle_exception(exc_val) -class OracleSyncDriver(SyncDriverAdapterBase): +class OracleSyncDriver(OraclePipelineMixin, SyncDriverAdapterBase): """Synchronous Oracle Database driver. Provides Oracle Database connectivity with parameter style conversion, error handling, and transaction management. """ - __slots__ = ("_data_dictionary",) + __slots__ = ("_data_dictionary", "_oracle_version", "_pipeline_support", "_pipeline_support_reason") dialect = "oracle" def __init__( @@ -360,6 +594,9 @@ def __init__( super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features) self._data_dictionary: SyncDataDictionaryBase | None = None + self._pipeline_support: bool | None = None + self._pipeline_support_reason: str | None = None + self._oracle_version: VersionInfo | None = None def with_cursor(self, connection: OracleSyncConnection) -> OracleSyncCursor: """Create context manager for Oracle cursor. @@ -418,6 +655,23 @@ def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResult": last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True ) + def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = False) -> "tuple[StackResult, ...]": + """Execute a StatementStack using Oracle's pipeline when available.""" + + if not isinstance(stack, StatementStack) or not stack: + return super().execute_stack(stack, continue_on_error=continue_on_error) + + blocker = self._stack_native_blocker(stack) + if blocker is not None: + self._log_pipeline_skip(blocker, stack) + return super().execute_stack(stack, continue_on_error=continue_on_error) + + if not self._pipeline_native_supported(): + self._log_pipeline_skip(self._pipeline_support_reason or "database_version", stack) + return super().execute_stack(stack, continue_on_error=continue_on_error) + + return self._execute_stack_native(stack, continue_on_error=continue_on_error) + def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute SQL with multiple parameter sets using Oracle batch processing. @@ -450,6 +704,38 @@ def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult": return self.create_execution_result(cursor, rowcount_override=affected_rows, is_many_result=True) + def _execute_stack_native(self, stack: "StatementStack", *, continue_on_error: bool) -> "tuple[StackResult, ...]": + compiled_operations = [self._prepare_pipeline_operation(op) for op in stack.operations] + pipeline = oracledb.create_pipeline() + for compiled in compiled_operations: + self._add_pipeline_operation(pipeline, compiled) + + results: list[StackResult] = [] + started_transaction = False + + with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=True) as observer: + try: + if not continue_on_error and not self._connection_in_transaction(): + self.begin() + started_transaction = True + + pipeline_results = self.connection.run_pipeline(pipeline, continue_on_error=continue_on_error) + results = self._build_stack_results_from_pipeline( + compiled_operations, pipeline_results, continue_on_error, observer + ) + + if started_transaction: + self.commit() + except Exception as exc: + if started_transaction: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after pipeline failure failed: %s", rollback_error) + raise self._wrap_pipeline_error(exc, stack, continue_on_error) from exc + + return tuple(results) + def _execute_statement(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute single SQL statement with Oracle data handling. @@ -512,6 +798,45 @@ def select_to_storage( self._attach_partition_telemetry(telemetry_payload, partitioner) return self._create_storage_job(telemetry_payload, telemetry) + def _detect_oracle_version(self) -> "VersionInfo | None": + if self._oracle_version is not None: + return self._oracle_version + version = self.data_dictionary.get_version(self) + self._oracle_version = version + return version + + def _detect_oracledb_version(self) -> "tuple[int, int, int]": + return _ORACLEDB_VERSION + + def _pipeline_native_supported(self) -> bool: + if self._pipeline_support is not None: + return self._pipeline_support + + if self.stack_native_disabled: + self._pipeline_support = False + self._pipeline_support_reason = "env_override" + return False + + if self._detect_oracledb_version() < PIPELINE_MIN_DRIVER_VERSION: + self._pipeline_support = False + self._pipeline_support_reason = "driver_version" + return False + + if not hasattr(self.connection, "run_pipeline"): + self._pipeline_support = False + self._pipeline_support_reason = "driver_api_missing" + return False + + version_info = self._detect_oracle_version() + if version_info and version_info.major >= PIPELINE_MIN_DATABASE_MAJOR: + self._pipeline_support = True + self._pipeline_support_reason = None + return True + + self._pipeline_support = False + self._pipeline_support_reason = "database_version" + return False + def load_from_arrow( self, table: str, @@ -688,14 +1013,14 @@ def _truncate_table_sync(self, table: str) -> None: self.connection.execute(statement) -class OracleAsyncDriver(AsyncDriverAdapterBase): +class OracleAsyncDriver(OraclePipelineMixin, AsyncDriverAdapterBase): """Asynchronous Oracle Database driver. Provides Oracle Database connectivity with parameter style conversion, error handling, and transaction management for async operations. """ - __slots__ = ("_data_dictionary",) + __slots__ = ("_data_dictionary", "_oracle_version", "_pipeline_support", "_pipeline_support_reason") dialect = "oracle" def __init__( @@ -715,6 +1040,9 @@ def __init__( super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features) self._data_dictionary: AsyncDataDictionaryBase | None = None + self._pipeline_support: bool | None = None + self._pipeline_support_reason: str | None = None + self._oracle_version: VersionInfo | None = None def with_cursor(self, connection: OracleAsyncConnection) -> OracleAsyncCursor: """Create context manager for Oracle cursor. @@ -773,6 +1101,25 @@ async def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResu last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True ) + async def execute_stack( + self, stack: "StatementStack", *, continue_on_error: bool = False + ) -> "tuple[StackResult, ...]": + """Execute a StatementStack using Oracle's pipeline when available.""" + + if not isinstance(stack, StatementStack) or not stack: + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + blocker = self._stack_native_blocker(stack) + if blocker is not None: + self._log_pipeline_skip(blocker, stack) + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + if not await self._pipeline_native_supported(): + self._log_pipeline_skip(self._pipeline_support_reason or "database_version", stack) + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + return await self._execute_stack_native(stack, continue_on_error=continue_on_error) + async def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute SQL with multiple parameter sets using Oracle batch processing. @@ -800,6 +1147,79 @@ async def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult return self.create_execution_result(cursor, rowcount_override=affected_rows, is_many_result=True) + async def _execute_stack_native( + self, stack: "StatementStack", *, continue_on_error: bool + ) -> "tuple[StackResult, ...]": + compiled_operations = [self._prepare_pipeline_operation(op) for op in stack.operations] + pipeline = oracledb.create_pipeline() + for compiled in compiled_operations: + self._add_pipeline_operation(pipeline, compiled) + + results: list[StackResult] = [] + started_transaction = False + + with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=True) as observer: + try: + if not continue_on_error and not self._connection_in_transaction(): + await self.begin() + started_transaction = True + + pipeline_results = await self.connection.run_pipeline(pipeline, continue_on_error=continue_on_error) + results = self._build_stack_results_from_pipeline( + compiled_operations, pipeline_results, continue_on_error, observer + ) + + if started_transaction: + await self.commit() + except Exception as exc: + if started_transaction: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after pipeline failure failed: %s", rollback_error) + raise self._wrap_pipeline_error(exc, stack, continue_on_error) from exc + + return tuple(results) + + async def _pipeline_native_supported(self) -> bool: + if self._pipeline_support is not None: + return self._pipeline_support + + if self.stack_native_disabled: + self._pipeline_support = False + self._pipeline_support_reason = "env_override" + return False + + if self._detect_oracledb_version() < PIPELINE_MIN_DRIVER_VERSION: + self._pipeline_support = False + self._pipeline_support_reason = "driver_version" + return False + + if not hasattr(self.connection, "run_pipeline"): + self._pipeline_support = False + self._pipeline_support_reason = "driver_api_missing" + return False + + version_info = await self._detect_oracle_version() + if version_info and version_info.major >= PIPELINE_MIN_DATABASE_MAJOR: + self._pipeline_support = True + self._pipeline_support_reason = None + return True + + self._pipeline_support = False + self._pipeline_support_reason = "database_version" + return False + + async def _detect_oracle_version(self) -> "VersionInfo | None": + if self._oracle_version is not None: + return self._oracle_version + version = await self.data_dictionary.get_version(self) + self._oracle_version = version + return version + + def _detect_oracledb_version(self) -> "tuple[int, int, int]": + return _ORACLEDB_VERSION + async def _execute_statement(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute single SQL statement with Oracle data handling. diff --git a/sqlspec/adapters/psycopg/driver.py b/sqlspec/adapters/psycopg/driver.py index 6d21f35fe..0a2854b69 100644 --- a/sqlspec/adapters/psycopg/driver.py +++ b/sqlspec/adapters/psycopg/driver.py @@ -1,23 +1,9 @@ -"""PostgreSQL psycopg driver implementation. - -This driver provides PostgreSQL database connectivity using psycopg3: -- SQL statement execution with parameter binding -- Connection and transaction management -- Row result processing with dictionary-based access -- PostgreSQL-specific features (COPY, arrays, JSON types) - -PostgreSQL Features: -- Parameter styles ($1, %s, %(name)s) -- PostgreSQL array support -- COPY operations for bulk data transfer -- JSON/JSONB type handling -- PostgreSQL-specific error handling -""" +"""PostgreSQL psycopg driver implementation.""" import datetime import io from contextlib import AsyncExitStack, ExitStack -from typing import TYPE_CHECKING, Any, cast +from typing import TYPE_CHECKING, Any, NamedTuple, Protocol, cast import psycopg from psycopg import sql as psycopg_sql @@ -29,7 +15,11 @@ ParameterStyle, ParameterStyleConfig, SQLResult, + StackOperation, + StackResult, + Statement, StatementConfig, + StatementStack, build_statement_config_from_profile, get_cache_config, is_copy_from_operation, @@ -38,6 +28,7 @@ register_driver_profile, ) from sqlspec.driver import AsyncDriverAdapterBase, SyncDriverAdapterBase +from sqlspec.driver._common import StackExecutionObserver, describe_stack_statement from sqlspec.exceptions import ( CheckViolationError, DatabaseConnectionError, @@ -48,6 +39,7 @@ OperationalError, SQLParsingError, SQLSpecError, + StackExecutionError, TransactionError, UniqueViolationError, ) @@ -59,6 +51,7 @@ from collections.abc import Callable from contextlib import AbstractAsyncContextManager, AbstractContextManager + from sqlspec.builder import QueryBuilder from sqlspec.core import ArrowResult from sqlspec.driver._async import AsyncDataDictionaryBase from sqlspec.driver._common import ExecutionResult @@ -72,6 +65,21 @@ SyncStoragePipeline, ) + class _PipelineDriver(Protocol): + statement_config: StatementConfig + + def prepare_statement( + self, + statement: "SQL | Statement | QueryBuilder", + parameters: Any, + *, + statement_config: StatementConfig, + kwargs: dict[str, Any], + ) -> SQL: ... + + def _get_compiled_sql(self, statement: SQL, statement_config: StatementConfig) -> tuple[str, Any]: ... + + __all__ = ( "PsycopgAsyncCursor", "PsycopgAsyncDriver", @@ -86,6 +94,79 @@ logger = get_logger("adapters.psycopg") +def _psycopg_pipeline_supported() -> bool: + """Return True when libpq pipeline support is available.""" + + capabilities = getattr(psycopg, "capabilities", None) + if capabilities is None: + return False + try: + return bool(capabilities.has_pipeline()) + except Exception: # pragma: no cover - defensive guard for unexpected capability implementations + return False + + +class _PreparedStackOperation(NamedTuple): + """Precompiled stack operation metadata for psycopg pipeline execution.""" + + operation_index: int + operation: "StackOperation" + statement: "SQL" + sql: str + parameters: "tuple[Any, ...] | dict[str, Any] | None" + + +class _PipelineCursorEntry(NamedTuple): + """Cursor pending result data for psycopg pipeline execution.""" + + prepared: "_PreparedStackOperation" + cursor: Any + + +class PsycopgPipelineMixin: + """Shared helpers for psycopg sync/async pipeline execution.""" + + __slots__ = () + + def _prepare_pipeline_operations(self, stack: "StatementStack") -> "list[_PreparedStackOperation] | None": + prepared: list[_PreparedStackOperation] = [] + for index, operation in enumerate(stack.operations): + normalized = self._normalize_stack_operation_for_pipeline(index, operation) + if normalized is None: + return None + prepared.append(normalized) + return prepared + + def _normalize_stack_operation_for_pipeline( + self, index: int, operation: "StackOperation" + ) -> "_PreparedStackOperation | None": + if operation.method != "execute": + return None + + kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {} + statement_config = kwargs.pop("statement_config", None) + driver = cast("_PipelineDriver", self) + config = statement_config or driver.statement_config + + sql_statement = driver.prepare_statement( + operation.statement, operation.arguments, statement_config=config, kwargs=kwargs + ) + + if sql_statement.is_script or sql_statement.is_many: + return None + + sql_text, prepared_parameters = driver._get_compiled_sql( # pyright: ignore[reportPrivateUsage] + sql_statement, config + ) + return _PreparedStackOperation( + operation_index=index, + operation=operation, + statement=sql_statement, + sql=sql_text, + parameters=prepared_parameters, + ) + + TRANSACTION_STATUS_IDLE = 0 TRANSACTION_STATUS_ACTIVE = 1 TRANSACTION_STATUS_INTRANS = 2 @@ -231,7 +312,7 @@ def _raise_generic_error(self, e: Any, code: "str | None") -> None: raise SQLSpecError(msg) from e -class PsycopgSyncDriver(SyncDriverAdapterBase): +class PsycopgSyncDriver(PsycopgPipelineMixin, SyncDriverAdapterBase): """PostgreSQL psycopg synchronous driver. Provides synchronous database operations for PostgreSQL using psycopg3. @@ -413,6 +494,99 @@ def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResult": last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True ) + def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = False) -> "tuple[StackResult, ...]": + """Execute a StatementStack using psycopg pipeline mode when supported.""" + + if ( + not isinstance(stack, StatementStack) + or not stack + or self.stack_native_disabled + or not _psycopg_pipeline_supported() + or continue_on_error + ): + return super().execute_stack(stack, continue_on_error=continue_on_error) + + prepared_ops = self._prepare_pipeline_operations(stack) + if prepared_ops is None: + return super().execute_stack(stack, continue_on_error=continue_on_error) + + return self._execute_stack_pipeline(stack, prepared_ops) + + def _execute_stack_pipeline( + self, stack: "StatementStack", prepared_ops: "list[_PreparedStackOperation]" + ) -> "tuple[StackResult, ...]": + results: list[StackResult] = [] + started_transaction = False + + with StackExecutionObserver(self, stack, continue_on_error=False, native_pipeline=True): + try: + if not self._connection_in_transaction(): + self.begin() + started_transaction = True + + with ExitStack() as resource_stack: + pipeline = resource_stack.enter_context(self.connection.pipeline()) + pending: list[_PipelineCursorEntry] = [] + + for prepared in prepared_ops: + exception_ctx = self.handle_database_exceptions() + resource_stack.enter_context(exception_ctx) + cursor = resource_stack.enter_context(self.with_cursor(self.connection)) + + try: + if prepared.parameters: + cursor.execute(prepared.sql, prepared.parameters) + else: + cursor.execute(prepared.sql) + except Exception as exc: + stack_error = StackExecutionError( + prepared.operation_index, + describe_stack_statement(prepared.operation.statement), + exc, + adapter=type(self).__name__, + mode="fail-fast", + ) + raise stack_error from exc + + pending.append(_PipelineCursorEntry(prepared=prepared, cursor=cursor)) + + pipeline.sync() + + results.extend(self._build_pipeline_stack_result(entry) for entry in pending) + + if started_transaction: + self.commit() + except Exception: + if started_transaction: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after psycopg pipeline failure failed: %s", rollback_error) + raise + + return tuple(results) + + def _build_pipeline_stack_result(self, entry: "_PipelineCursorEntry") -> StackResult: + statement = entry.prepared.statement + cursor = entry.cursor + + if statement.returns_rows(): + fetched_data = cursor.fetchall() + column_names = [col.name for col in cursor.description or []] + execution_result = self.create_execution_result( + cursor, + selected_data=fetched_data, + column_names=column_names, + data_row_count=len(fetched_data), + is_select_result=True, + ) + else: + affected_rows = cursor.rowcount if cursor.rowcount and cursor.rowcount > 0 else 0 + execution_result = self.create_execution_result(cursor, rowcount_override=affected_rows) + + sql_result = self.build_statement_result(statement, execution_result) + return StackResult.from_sql_result(sql_result) + def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute SQL with multiple parameter sets. @@ -671,7 +845,7 @@ def _raise_generic_error(self, e: Any, code: "str | None") -> None: raise SQLSpecError(msg) from e -class PsycopgAsyncDriver(AsyncDriverAdapterBase): +class PsycopgAsyncDriver(PsycopgPipelineMixin, AsyncDriverAdapterBase): """PostgreSQL psycopg asynchronous driver. Provides asynchronous database operations for PostgreSQL using psycopg3. @@ -711,10 +885,10 @@ def with_cursor(self, connection: "PsycopgAsyncConnection") -> "PsycopgAsyncCurs async def begin(self) -> None: """Begin a database transaction on the current connection.""" try: - if hasattr(self.connection, "autocommit") and not self.connection.autocommit: - pass - else: - self.connection.autocommit = False + autocommit_flag = getattr(self.connection, "autocommit", None) + if isinstance(autocommit_flag, bool) and not autocommit_flag: + return + await self.connection.set_autocommit(False) except Exception as e: msg = f"Failed to begin transaction: {e}" raise SQLSpecError(msg) from e @@ -855,6 +1029,101 @@ async def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResu last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True ) + async def execute_stack( + self, stack: "StatementStack", *, continue_on_error: bool = False + ) -> "tuple[StackResult, ...]": + """Execute a StatementStack using psycopg async pipeline when supported.""" + + if ( + not isinstance(stack, StatementStack) + or not stack + or self.stack_native_disabled + or not _psycopg_pipeline_supported() + or continue_on_error + ): + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + prepared_ops = self._prepare_pipeline_operations(stack) + if prepared_ops is None: + return await super().execute_stack(stack, continue_on_error=continue_on_error) + + return await self._execute_stack_pipeline(stack, prepared_ops) + + async def _execute_stack_pipeline( + self, stack: "StatementStack", prepared_ops: "list[_PreparedStackOperation]" + ) -> "tuple[StackResult, ...]": + results: list[StackResult] = [] + started_transaction = False + + with StackExecutionObserver(self, stack, continue_on_error=False, native_pipeline=True): + try: + if not self._connection_in_transaction(): + await self.begin() + started_transaction = True + + async with AsyncExitStack() as resource_stack: + pipeline = await resource_stack.enter_async_context(self.connection.pipeline()) + pending: list[_PipelineCursorEntry] = [] + + for prepared in prepared_ops: + exception_ctx = self.handle_database_exceptions() + await resource_stack.enter_async_context(exception_ctx) + cursor = await resource_stack.enter_async_context(self.with_cursor(self.connection)) + + try: + if prepared.parameters: + await cursor.execute(prepared.sql, prepared.parameters) + else: + await cursor.execute(prepared.sql) + except Exception as exc: + stack_error = StackExecutionError( + prepared.operation_index, + describe_stack_statement(prepared.operation.statement), + exc, + adapter=type(self).__name__, + mode="fail-fast", + ) + raise stack_error from exc + + pending.append(_PipelineCursorEntry(prepared=prepared, cursor=cursor)) + + await pipeline.sync() + + results.extend([await self._build_pipeline_stack_result_async(entry) for entry in pending]) + + if started_transaction: + await self.commit() + except Exception: + if started_transaction: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after psycopg pipeline failure failed: %s", rollback_error) + raise + + return tuple(results) + + async def _build_pipeline_stack_result_async(self, entry: "_PipelineCursorEntry") -> StackResult: + statement = entry.prepared.statement + cursor = entry.cursor + + if statement.returns_rows(): + fetched_data = await cursor.fetchall() + column_names = [col.name for col in cursor.description or []] + execution_result = self.create_execution_result( + cursor, + selected_data=fetched_data, + column_names=column_names, + data_row_count=len(fetched_data), + is_select_result=True, + ) + else: + affected_rows = cursor.rowcount if cursor.rowcount and cursor.rowcount > 0 else 0 + execution_result = self.create_execution_result(cursor, rowcount_override=affected_rows) + + sql_result = self.build_statement_result(statement, execution_result) + return StackResult.from_sql_result(sql_result) + async def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult": """Execute SQL with multiple parameter sets (async). diff --git a/sqlspec/config.py b/sqlspec/config.py index f7d2447ec..0a2534a21 100644 --- a/sqlspec/config.py +++ b/sqlspec/config.py @@ -8,7 +8,7 @@ from sqlspec.core import ParameterStyle, ParameterStyleConfig, StatementConfig from sqlspec.exceptions import MissingDependencyError -from sqlspec.migrations.tracker import AsyncMigrationTracker, SyncMigrationTracker +from sqlspec.migrations import AsyncMigrationTracker, SyncMigrationTracker from sqlspec.observability import ObservabilityConfig from sqlspec.utils.logging import get_logger from sqlspec.utils.module_loader import ensure_pyarrow @@ -1208,6 +1208,8 @@ def __init__( self.driver_features = driver_features or {} self._storage_capabilities = None self.driver_features.setdefault("storage_capabilities", self.storage_capabilities()) + self._promote_driver_feature_hooks() + self._configure_observability_extensions() def create_pool(self) -> PoolT: """Create and return the connection pool. @@ -1381,6 +1383,8 @@ def __init__( self.driver_features = driver_features or {} self._storage_capabilities = None self.driver_features.setdefault("storage_capabilities", self.storage_capabilities()) + self._promote_driver_feature_hooks() + self._configure_observability_extensions() async def create_pool(self) -> PoolT: """Create and return the connection pool. diff --git a/sqlspec/core/__init__.py b/sqlspec/core/__init__.py index be7ff403a..215fc1001 100644 --- a/sqlspec/core/__init__.py +++ b/sqlspec/core/__init__.py @@ -143,6 +143,7 @@ hash_parameters, hash_sql_statement, ) +from sqlspec.core.metrics import StackExecutionMetrics from sqlspec.core.parameters import ( DRIVER_PARAMETER_PROFILES, EXECUTE_MANY_MIN_ROWS, @@ -171,9 +172,16 @@ validate_parameter_alignment, wrap_with_type, ) -from sqlspec.core.result import ArrowResult, SQLResult, StackResult, StatementResult, create_arrow_result, create_sql_result -from sqlspec.core.stack import StackOperation, StatementStack +from sqlspec.core.result import ( + ArrowResult, + SQLResult, + StackResult, + StatementResult, + create_arrow_result, + create_sql_result, +) from sqlspec.core.splitter import split_sql_script +from sqlspec.core.stack import StackOperation, StatementStack from sqlspec.core.statement import ( SQL, ProcessedState, @@ -193,6 +201,7 @@ format_datetime_rfc3339, parse_datetime_rfc3339, ) +from sqlspec.exceptions import StackExecutionError __all__ = ( "DRIVER_PARAMETER_PROFILES", @@ -231,14 +240,16 @@ "ProcessedState", "SQLProcessor", "SQLResult", + "SearchFilter", + "StackExecutionError", + "StackExecutionMetrics", "StackOperation", "StackResult", - "StatementStack", - "SearchFilter", "Statement", "StatementConfig", "StatementFilter", "StatementResult", + "StatementStack", "TypedParameter", "UnifiedCache", "apply_filter", diff --git a/sqlspec/core/metrics.py b/sqlspec/core/metrics.py new file mode 100644 index 000000000..f28df3965 --- /dev/null +++ b/sqlspec/core/metrics.py @@ -0,0 +1,83 @@ +"""Telemetry helper objects for stack execution.""" + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: # pragma: no cover - imported for typing only + from sqlspec.observability import ObservabilityRuntime + +__all__ = ("StackExecutionMetrics",) + + +class StackExecutionMetrics: + """Capture telemetry facts about a stack execution.""" + + __slots__ = ( + "adapter", + "continue_on_error", + "duration_s", + "error_count", + "error_type", + "forced_disable", + "native_pipeline", + "statement_count", + ) + + def __init__( + self, + adapter: str, + statement_count: int, + *, + continue_on_error: bool, + native_pipeline: bool, + forced_disable: bool, + ) -> None: + self.adapter = adapter + self.statement_count = statement_count + self.continue_on_error = continue_on_error + self.native_pipeline = native_pipeline + self.forced_disable = forced_disable + self.duration_s = 0.0 + self.error_type: str | None = None + self.error_count = 0 + + def record_duration(self, duration: float) -> None: + """Record execution duration in seconds.""" + + self.duration_s = duration + + def record_operation_error(self, error: Exception) -> None: + """Record an operation error when continue-on-error is enabled.""" + + self.error_count += 1 + if not self.continue_on_error and self.error_type is None: + self.error_type = type(error).__name__ + + def record_error(self, error: Exception) -> None: + """Record a terminal error.""" + + self.error_type = type(error).__name__ + self.error_count = max(self.error_count, 1) + + def emit(self, runtime: "ObservabilityRuntime") -> None: + """Emit collected metrics to the configured runtime.""" + + runtime.increment_metric("stack.execute.invocations") + runtime.increment_metric("stack.execute.statements", float(self.statement_count)) + + mode = "continue" if self.continue_on_error else "failfast" + runtime.increment_metric(f"stack.execute.mode.{mode}") + + pipeline_label = "native" if self.native_pipeline else "sequential" + runtime.increment_metric(f"stack.execute.path.{pipeline_label}") + + if self.forced_disable: + runtime.increment_metric("stack.execute.override.forced") + + runtime.increment_metric("stack.execute.duration_ms", self.duration_s * 1000.0) + + if self.error_type is not None: + runtime.increment_metric("stack.execute.errors") + runtime.increment_metric(f"stack.execute.errors.{self.error_type}") + + if self.error_count and self.continue_on_error: + runtime.increment_metric("stack.execute.partial_errors", float(self.error_count)) diff --git a/sqlspec/core/result.py b/sqlspec/core/result.py index 8566bd1de..597303382 100644 --- a/sqlspec/core/result.py +++ b/sqlspec/core/result.py @@ -10,6 +10,7 @@ """ from abc import ABC, abstractmethod +from collections.abc import Iterable, Iterator from typing import TYPE_CHECKING, Any, Optional, cast, overload from mypy_extensions import mypyc_attr @@ -27,8 +28,6 @@ from sqlspec.utils.schema import to_schema if TYPE_CHECKING: - from collections.abc import Iterator - from sqlspec.core.statement import SQL from sqlspec.typing import ArrowTable, PandasDataFrame, PolarsDataFrame, SchemaT @@ -39,7 +38,7 @@ @mypyc_attr(allow_interpreted_subclasses=False) -class StatementResult(ABC): +class StatementResult(ABC, Iterable[Any]): """Abstract base class for SQL statement execution results. Provides a common interface for handling different types of SQL operation @@ -83,6 +82,10 @@ def __init__( self.execution_time = execution_time self.metadata = metadata if metadata is not None else {} + @abstractmethod + def __iter__(self) -> Iterator[Any]: + """Iterate over result rows.""" + @abstractmethod def is_success(self) -> bool: """Check if the operation was successful. diff --git a/sqlspec/core/stack.py b/sqlspec/core/stack.py index 0fc76dbd9..45217bf0f 100644 --- a/sqlspec/core/stack.py +++ b/sqlspec/core/stack.py @@ -6,7 +6,8 @@ if TYPE_CHECKING: # pragma: no cover from sqlspec.builder import QueryBuilder - from sqlspec.core.statement import Statement, StatementConfig, StatementFilter + from sqlspec.core.filters import StatementFilter + from sqlspec.core.statement import Statement, StatementConfig from sqlspec.typing import StatementParameters __all__ = ("StackOperation", "StatementStack") diff --git a/sqlspec/driver/__init__.py b/sqlspec/driver/__init__.py index ce6ea61bc..4bc2e18e0 100644 --- a/sqlspec/driver/__init__.py +++ b/sqlspec/driver/__init__.py @@ -2,7 +2,13 @@ from sqlspec.driver import mixins from sqlspec.driver._async import AsyncDataDictionaryBase, AsyncDriverAdapterBase -from sqlspec.driver._common import CommonDriverAttributesMixin, ExecutionResult, VersionInfo +from sqlspec.driver._common import ( + CommonDriverAttributesMixin, + ExecutionResult, + StackExecutionObserver, + VersionInfo, + describe_stack_statement, +) from sqlspec.driver._sync import SyncDataDictionaryBase, SyncDriverAdapterBase __all__ = ( @@ -11,9 +17,11 @@ "CommonDriverAttributesMixin", "DriverAdapterProtocol", "ExecutionResult", + "StackExecutionObserver", "SyncDataDictionaryBase", "SyncDriverAdapterBase", "VersionInfo", + "describe_stack_statement", "mixins", ) diff --git a/sqlspec/driver/_async.py b/sqlspec/driver/_async.py index d389b7746..2620c64ab 100644 --- a/sqlspec/driver/_async.py +++ b/sqlspec/driver/_async.py @@ -10,6 +10,7 @@ CommonDriverAttributesMixin, DataDictionaryMixin, ExecutionResult, + StackExecutionObserver, VersionInfo, describe_stack_statement, handle_single_row_error, @@ -202,62 +203,76 @@ async def execute_stack( raise ValueError(msg) results: list[StackResult] = [] - started_transaction = False single_transaction = not continue_on_error - mode_label = "continue-on-error" if continue_on_error else "fail-fast" - logger.debug( - "Executing statement stack: driver=%s size=%s mode=%s in_tx=%s", - type(self).__name__, - len(stack.operations), - mode_label, - self._connection_in_transaction(), - ) - try: - if single_transaction and not self._connection_in_transaction(): - await self.begin() - started_transaction = True - - for index, operation in enumerate(stack.operations): - try: - raw_result = await self._execute_stack_operation(operation) - except Exception as exc: # pragma: no cover - exercised via tests - stack_error = StackExecutionError( - index, - describe_stack_statement(operation.statement), - exc, - adapter=type(self).__name__, - mode="continue-on-error" if continue_on_error else "fail-fast", - ) - - if started_transaction and not continue_on_error: - try: - await self.rollback() - except Exception as rollback_error: # pragma: no cover - diagnostics only - logger.debug("Rollback after stack failure failed: %s", rollback_error) - started_transaction = False + with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=False) as observer: + started_transaction = False + + try: + if single_transaction and not self._connection_in_transaction(): + await self.begin() + started_transaction = True + + for index, operation in enumerate(stack.operations): + try: + raw_result = await self._execute_stack_operation(operation) + except Exception as exc: # pragma: no cover - exercised via tests + stack_error = StackExecutionError( + index, + describe_stack_statement(operation.statement), + exc, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", + ) + + if started_transaction and not continue_on_error: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + started_transaction = False + + if continue_on_error: + await self._rollback_after_stack_error_async() + observer.record_operation_error(stack_error) + results.append(StackResult.from_error(stack_error)) + continue + + raise stack_error from exc + + results.append(StackResult(raw_result=raw_result)) if continue_on_error: - logger.debug("Stack operation %s failed in continue-on-error mode: %s", index, exc) - results.append(StackResult.from_error(stack_error)) - continue - - raise stack_error from exc - - results.append(StackResult(raw_result=raw_result)) - - if started_transaction: - await self.commit() - except Exception: - if started_transaction: - try: - await self.rollback() - except Exception as rollback_error: # pragma: no cover - diagnostics only - logger.debug("Rollback after stack failure failed: %s", rollback_error) - raise + await self._commit_after_stack_operation_async() + + if started_transaction: + await self.commit() + except Exception: + if started_transaction: + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + raise return tuple(results) + async def _rollback_after_stack_error_async(self) -> None: + """Attempt to rollback after a stack operation error (async).""" + + try: + await self.rollback() + except Exception as rollback_error: # pragma: no cover - driver-specific cleanup + logger.debug("Rollback after stack error failed: %s", rollback_error) + + async def _commit_after_stack_operation_async(self) -> None: + """Attempt to commit after a successful stack operation when not batching (async).""" + + try: + await self.commit() + except Exception as commit_error: # pragma: no cover - driver-specific cleanup + logger.debug("Commit after stack operation failed: %s", commit_error) + @abstractmethod async def _execute_many(self, cursor: Any, statement: "SQL") -> ExecutionResult: """Execute SQL with multiple parameter sets (executemany). diff --git a/sqlspec/driver/_common.py b/sqlspec/driver/_common.py index ef145c04e..0cb2e7b91 100644 --- a/sqlspec/driver/_common.py +++ b/sqlspec/driver/_common.py @@ -1,8 +1,11 @@ """Common driver attributes and utilities.""" +import hashlib +import logging import re from contextlib import suppress -from typing import TYPE_CHECKING, Any, Final, NamedTuple, NoReturn, Optional, TypeVar, cast +from time import perf_counter +from typing import TYPE_CHECKING, Any, Final, Literal, NamedTuple, NoReturn, Optional, TypeVar, cast from mypy_extensions import trait from sqlglot import exp @@ -20,14 +23,16 @@ get_cache_config, split_sql_script, ) +from sqlspec.core.metrics import StackExecutionMetrics from sqlspec.exceptions import ImproperConfigurationError, NotFoundError -from sqlspec.utils.logging import get_logger +from sqlspec.utils.logging import get_logger, log_with_context from sqlspec.utils.type_guards import is_statement_filter if TYPE_CHECKING: from collections.abc import Sequence from sqlspec.core import FilterTypeT, StatementFilter + from sqlspec.core.stack import StatementStack from sqlspec.observability import ObservabilityRuntime from sqlspec.typing import StatementParameters @@ -41,9 +46,11 @@ "DataDictionaryMixin", "ExecutionResult", "ScriptExecutionResult", + "StackExecutionObserver", "VersionInfo", "describe_stack_statement", "handle_single_row_error", + "hash_stack_operations", "make_cache_key_hashable", ) @@ -108,6 +115,108 @@ def make_cache_key_hashable(obj: Any) -> Any: return obj +def hash_stack_operations(stack: "StatementStack") -> "tuple[str, ...]": + """Return SHA256 fingerprints for statements contained in the stack.""" + + hashes: list[str] = [] + for operation in stack.operations: + summary = describe_stack_statement(operation.statement) + if not isinstance(summary, str): + summary = str(summary) + digest = hashlib.sha256(summary.encode("utf-8")).hexdigest() + hashes.append(digest[:16]) + return tuple(hashes) + + +class StackExecutionObserver: + """Context manager that aggregates telemetry for stack execution.""" + + __slots__ = ( + "continue_on_error", + "driver", + "hashed_operations", + "metrics", + "native_pipeline", + "runtime", + "span", + "stack", + "started", + ) + + def __init__( + self, + driver: "CommonDriverAttributesMixin", + stack: "StatementStack", + continue_on_error: bool, + native_pipeline: bool, + ) -> None: + self.driver = driver + self.stack = stack + self.continue_on_error = continue_on_error + self.native_pipeline = native_pipeline + self.runtime = driver.observability + self.metrics = StackExecutionMetrics( + adapter=type(driver).__name__, + statement_count=len(stack.operations), + continue_on_error=continue_on_error, + native_pipeline=native_pipeline, + forced_disable=driver.stack_native_disabled, + ) + self.hashed_operations = hash_stack_operations(stack) + self.span: Any | None = None + self.started = 0.0 + + def __enter__(self) -> "StackExecutionObserver": + self.started = perf_counter() + attributes = { + "sqlspec.stack.statement_count": len(self.stack.operations), + "sqlspec.stack.continue_on_error": self.continue_on_error, + "sqlspec.stack.native_pipeline": self.native_pipeline, + "sqlspec.stack.forced_disable": self.driver.stack_native_disabled, + } + self.span = self.runtime.start_span("sqlspec.stack.execute", attributes=attributes) + log_with_context( + logger, + logging.DEBUG, + "stack.execute.start", + driver=type(self.driver).__name__, + stack_size=len(self.stack.operations), + continue_on_error=self.continue_on_error, + native_pipeline=self.native_pipeline, + forced_disable=self.driver.stack_native_disabled, + hashed_operations=self.hashed_operations, + ) + return self + + def __exit__(self, exc_type: Any, exc: Exception | None, exc_tb: Any) -> Literal[False]: + duration = perf_counter() - self.started + self.metrics.record_duration(duration) + if exc is not None: + self.metrics.record_error(exc) + self.runtime.span_manager.end_span(self.span, error=exc if exc is not None else None) + self.metrics.emit(self.runtime) + level = logging.ERROR if exc is not None else logging.DEBUG + log_with_context( + logger, + level, + "stack.execute.failed" if exc is not None else "stack.execute.complete", + driver=type(self.driver).__name__, + stack_size=len(self.stack.operations), + continue_on_error=self.continue_on_error, + native_pipeline=self.native_pipeline, + forced_disable=self.driver.stack_native_disabled, + hashed_operations=self.hashed_operations, + duration_s=duration, + error_type=type(exc).__name__ if exc is not None else None, + ) + return False + + def record_operation_error(self, error: Exception) -> None: + """Record an operation error when continue-on-error is enabled.""" + + self.metrics.record_operation_error(error) + + def describe_stack_statement(statement: Any) -> str: """Return a readable representation of a stack statement for diagnostics.""" @@ -343,6 +452,12 @@ def observability(self) -> "ObservabilityRuntime": self._observability = ObservabilityRuntime(config_name=type(self).__name__) return self._observability + @property + def stack_native_disabled(self) -> bool: + """Return True when native stack execution is disabled for this driver.""" + + return bool(self.driver_features.get("stack_native_disabled", False)) + def create_execution_result( self, cursor_result: Any, diff --git a/sqlspec/driver/_sync.py b/sqlspec/driver/_sync.py index 9e482e557..015ccdebf 100644 --- a/sqlspec/driver/_sync.py +++ b/sqlspec/driver/_sync.py @@ -10,6 +10,7 @@ CommonDriverAttributesMixin, DataDictionaryMixin, ExecutionResult, + StackExecutionObserver, VersionInfo, describe_stack_statement, handle_single_row_error, @@ -189,12 +190,7 @@ def _execute_script(self, cursor: Any, statement: "SQL") -> ExecutionResult: cursor, statement_count=statement_count, successful_statements=successful_count, is_script_result=True ) - def execute_stack( - self, - stack: "StatementStack", - *, - continue_on_error: bool = False, - ) -> "tuple[StackResult, ...]": + def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = False) -> "tuple[StackResult, ...]": """Execute a StatementStack sequentially using the adapter's primitives.""" if not isinstance(stack, StatementStack): @@ -205,66 +201,76 @@ def execute_stack( raise ValueError(msg) results: list[StackResult] = [] - started_transaction = False single_transaction = not continue_on_error - mode_label = "continue-on-error" if continue_on_error else "fail-fast" - logger.debug( - "Executing statement stack: driver=%s size=%s mode=%s in_tx=%s", - type(self).__name__, - len(stack.operations), - mode_label, - self._connection_in_transaction(), - ) - try: - if single_transaction and not self._connection_in_transaction(): - self.begin() - started_transaction = True - - for index, operation in enumerate(stack.operations): - try: - raw_result = self._execute_stack_operation(operation) - except Exception as exc: # pragma: no cover - exercised via tests - stack_error = StackExecutionError( - index, - describe_stack_statement(operation.statement), - exc, - adapter=type(self).__name__, - mode="continue-on-error" if continue_on_error else "fail-fast", - ) - - if started_transaction and not continue_on_error: - try: - self.rollback() - except Exception as rollback_error: # pragma: no cover - diagnostics only - logger.debug("Rollback after stack failure failed: %s", rollback_error) - started_transaction = False + with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=False) as observer: + started_transaction = False - if continue_on_error: - logger.debug( - "Stack operation %s failed in continue-on-error mode: %s", + try: + if single_transaction and not self._connection_in_transaction(): + self.begin() + started_transaction = True + + for index, operation in enumerate(stack.operations): + try: + raw_result = self._execute_stack_operation(operation) + except Exception as exc: # pragma: no cover - exercised via tests + stack_error = StackExecutionError( index, + describe_stack_statement(operation.statement), exc, + adapter=type(self).__name__, + mode="continue-on-error" if continue_on_error else "fail-fast", ) - results.append(StackResult.from_error(stack_error)) - continue - raise stack_error from exc + if started_transaction and not continue_on_error: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + started_transaction = False - results.append(StackResult(raw_result=raw_result)) + if continue_on_error: + self._rollback_after_stack_error() + observer.record_operation_error(stack_error) + results.append(StackResult.from_error(stack_error)) + continue - if started_transaction: - self.commit() - except Exception: - if started_transaction: - try: - self.rollback() - except Exception as rollback_error: # pragma: no cover - diagnostics only - logger.debug("Rollback after stack failure failed: %s", rollback_error) - raise + raise stack_error from exc + + results.append(StackResult(raw_result=raw_result)) + + if continue_on_error: + self._commit_after_stack_operation() + + if started_transaction: + self.commit() + except Exception: + if started_transaction: + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - diagnostics only + logger.debug("Rollback after stack failure failed: %s", rollback_error) + raise return tuple(results) + def _rollback_after_stack_error(self) -> None: + """Attempt to rollback after a stack operation error to clear connection state.""" + + try: + self.rollback() + except Exception as rollback_error: # pragma: no cover - driver-specific cleanup + logger.debug("Rollback after stack error failed: %s", rollback_error) + + def _commit_after_stack_operation(self) -> None: + """Attempt to commit after a successful stack operation when not batching.""" + + try: + self.commit() + except Exception as commit_error: # pragma: no cover - driver-specific cleanup + logger.debug("Commit after stack operation failed: %s", commit_error) + @abstractmethod def _execute_many(self, cursor: Any, statement: "SQL") -> ExecutionResult: """Execute SQL with multiple parameter sets (executemany). diff --git a/sqlspec/exceptions.py b/sqlspec/exceptions.py index 753b7850c..dbfba9d16 100644 --- a/sqlspec/exceptions.py +++ b/sqlspec/exceptions.py @@ -1,6 +1,8 @@ from collections.abc import Generator from contextlib import contextmanager -from typing import Any +from typing import Any, Final + +STACK_SQL_PREVIEW_LIMIT: Final[int] = 120 __all__ = ( "CheckViolationError", @@ -27,8 +29,8 @@ "SQLFileParseError", "SQLParsingError", "SQLSpecError", - "StackExecutionError", "SerializationError", + "StackExecutionError", "StorageCapabilityError", "StorageOperationFailedError", "TransactionError", @@ -188,8 +190,8 @@ def __init__( pipeline_state = "enabled" if native_pipeline else "disabled" adapter_label = adapter or "unknown-adapter" preview = " ".join(sql.strip().split()) - if len(preview) > 120: - preview = f"{preview[:117]}..." + if len(preview) > STACK_SQL_PREVIEW_LIMIT: + preview = f"{preview[: STACK_SQL_PREVIEW_LIMIT - 3]}..." detail = ( f"Stack operation {operation_index} failed on {adapter_label} " f"(mode={mode}, pipeline={pipeline_state}) sql={preview}" diff --git a/sqlspec/protocols.py b/sqlspec/protocols.py index 0b5886f35..f2a7971b5 100644 --- a/sqlspec/protocols.py +++ b/sqlspec/protocols.py @@ -4,7 +4,8 @@ and runtime isinstance() checks. """ -from typing import TYPE_CHECKING, Any, Mapping, Protocol, Sequence, runtime_checkable +from collections.abc import Mapping, Sequence +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable from typing_extensions import Self @@ -481,6 +482,8 @@ def select_to_arrow( ArrowResult containing Arrow data. """ ... + + @runtime_checkable class StackResultProtocol(Protocol): """Protocol describing stack execution results.""" diff --git a/tests/integration/test_adapters/test_adbc/test_adbc_driver.py b/tests/integration/test_adapters/test_adbc/test_adbc_driver.py index d670be664..a7a4f6401 100644 --- a/tests/integration/test_adapters/test_adbc/test_adbc_driver.py +++ b/tests/integration/test_adapters/test_adbc/test_adbc_driver.py @@ -5,7 +5,7 @@ import pytest from sqlspec.adapters.adbc import AdbcDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack from tests.integration.test_adapters.test_adbc.conftest import xfail_if_driver_missing ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -200,6 +200,52 @@ def test_adbc_postgresql_execute_script(adbc_postgresql_session: AdbcDriver) -> assert select_result.data[1]["value"] == 888 +@pytest.mark.xdist_group("postgres") +@pytest.mark.adbc +def test_adbc_postgresql_statement_stack_sequential(adbc_postgresql_session: AdbcDriver) -> None: + """ADBC PostgreSQL should keep StatementStack execution sequential.""" + + adbc_postgresql_session.execute("TRUNCATE TABLE test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "adbc-stack-one", 10)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (2, "adbc-stack-two", 20)) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE $1", ("adbc-stack-%",)) + ) + + results = adbc_postgresql_session.execute_stack(stack) + + assert len(results) == 3 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total"] == 2 + + +@pytest.mark.xdist_group("postgres") +@pytest.mark.adbc +def test_adbc_postgresql_statement_stack_continue_on_error(adbc_postgresql_session: AdbcDriver) -> None: + """continue_on_error should surface failures but execute remaining operations.""" + + adbc_postgresql_session.execute("TRUNCATE TABLE test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "adbc-initial", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "adbc-duplicate", 15)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (2, "adbc-final", 25)) + ) + + results = adbc_postgresql_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[1].error is not None + + verify = adbc_postgresql_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + @pytest.mark.xdist_group("postgres") @pytest.mark.adbc def test_adbc_postgresql_result_methods(adbc_postgresql_session: AdbcDriver) -> None: diff --git a/tests/integration/test_adapters/test_aiosqlite/test_driver.py b/tests/integration/test_adapters/test_aiosqlite/test_driver.py index cee20e9e6..300162342 100644 --- a/tests/integration/test_adapters/test_aiosqlite/test_driver.py +++ b/tests/integration/test_adapters/test_aiosqlite/test_driver.py @@ -8,7 +8,7 @@ import pytest from sqlspec.adapters.aiosqlite import AiosqliteDriver -from sqlspec.core import SQL, SQLResult +from sqlspec.core import SQL, SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("sqlite") ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -209,6 +209,54 @@ async def test_aiosqlite_data_types(aiosqlite_session: AiosqliteDriver) -> None: await aiosqlite_session.execute_script("DROP TABLE aiosqlite_data_types_test") +async def test_aiosqlite_statement_stack_sequential(aiosqlite_session: AiosqliteDriver) -> None: + """StatementStack execution should remain sequential for aiosqlite.""" + + await aiosqlite_session.execute("DELETE FROM test_table") + await aiosqlite_session.commit() + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "aiosqlite-stack-one", 100)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "aiosqlite-stack-two", 200)) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("aiosqlite-stack-%",)) + ) + + results = await aiosqlite_session.execute_stack(stack) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total"] == 2 + + +async def test_aiosqlite_statement_stack_continue_on_error(aiosqlite_session: AiosqliteDriver) -> None: + """Sequential execution should continue when continue_on_error is enabled.""" + + await aiosqlite_session.execute("DELETE FROM test_table") + await aiosqlite_session.commit() + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "aiosqlite-initial", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "aiosqlite-duplicate", 15)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "aiosqlite-final", 25)) + ) + + results = await aiosqlite_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = await aiosqlite_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + async def test_aiosqlite_transactions(aiosqlite_session: AiosqliteDriver) -> None: """Test transaction behavior.""" diff --git a/tests/integration/test_adapters/test_asyncmy/test_driver.py b/tests/integration/test_adapters/test_asyncmy/test_driver.py index d79ba0807..72ff1a1c3 100644 --- a/tests/integration/test_adapters/test_asyncmy/test_driver.py +++ b/tests/integration/test_adapters/test_asyncmy/test_driver.py @@ -12,7 +12,7 @@ from pytest_databases.docker.mysql import MySQLService from sqlspec.adapters.asyncmy import AsyncmyConfig, AsyncmyDriver -from sqlspec.core import SQL, SQLResult +from sqlspec.core import SQL, SQLResult, StatementStack from sqlspec.utils.serializers import from_json, to_json ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -163,6 +163,53 @@ async def test_asyncmy_data_types(asyncmy_driver: AsyncmyDriver) -> None: assert row["json_col"]["key"] == "value" +async def test_asyncmy_statement_stack_sequential(asyncmy_driver: AsyncmyDriver) -> None: + """StatementStack should execute sequentially for asyncmy (no native batching).""" + + await asyncmy_driver.execute_script("TRUNCATE TABLE test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "mysql-stack-one", 11)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "mysql-stack-two", 22)) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("mysql-stack-%",)) + ) + + results = await asyncmy_driver.execute_stack(stack) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + final_result = results[2].raw_result + assert isinstance(final_result, SQLResult) + data = final_result.get_data() + assert data + assert data[0]["total"] == 2 + + +async def test_asyncmy_statement_stack_continue_on_error(asyncmy_driver: AsyncmyDriver) -> None: + """Continue-on-error should still work with sequential fallback.""" + + await asyncmy_driver.execute_script("TRUNCATE TABLE test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "mysql-initial", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "mysql-duplicate", 15)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "mysql-final", 25)) + ) + + results = await asyncmy_driver.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = await asyncmy_driver.execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("mysql-%",)) + assert verify.get_data()[0]["total"] == 2 + + async def test_asyncmy_driver_features_custom_serializers(mysql_service: MySQLService) -> None: """Ensure custom serializer and deserializer driver features are applied.""" diff --git a/tests/integration/test_adapters/test_asyncpg/test_driver.py b/tests/integration/test_adapters/test_asyncpg/test_driver.py index 4151383c9..98c920695 100644 --- a/tests/integration/test_adapters/test_asyncpg/test_driver.py +++ b/tests/integration/test_adapters/test_asyncpg/test_driver.py @@ -7,7 +7,7 @@ from pytest_databases.docker.postgres import PostgresService from sqlspec.adapters.asyncpg import AsyncpgConfig, AsyncpgDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -818,3 +818,68 @@ async def test_for_update_of_tables(asyncpg_session: AsyncpgDriver) -> None: raise finally: await asyncpg_session.execute_script("DROP TABLE IF EXISTS test_users") + + +async def test_asyncpg_statement_stack_batch(asyncpg_session: AsyncpgDriver) -> None: + """Ensure StatementStack batches operations under asyncpg native path.""" + + await asyncpg_session.execute_script("TRUNCATE TABLE test_table RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "stack-one", 10)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (2, "stack-two", 20)) + .push_execute("SELECT COUNT(*) AS total_rows FROM test_table WHERE name LIKE $1", ("stack-%",)) + ) + + results = await asyncpg_session.execute_stack(stack) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total_rows"] == 2 + + +async def test_asyncpg_statement_stack_continue_on_error(asyncpg_session: AsyncpgDriver) -> None: + """Stack execution should surface errors while continuing operations when requested.""" + + await asyncpg_session.execute_script("TRUNCATE TABLE test_table RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "stack-initial", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "stack-duplicate", 10)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (2, "stack-final", 15)) + ) + + results = await asyncpg_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = await asyncpg_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + +async def test_asyncpg_statement_stack_marks_prepared(asyncpg_session: AsyncpgDriver) -> None: + """Prepared statement metadata should be attached to stack results.""" + + await asyncpg_session.execute_script("TRUNCATE TABLE test_table RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES ($1, $2, $3)", (1, "stack-prepared", 50)) + .push_execute("SELECT value FROM test_table WHERE id = $1", (1,)) + ) + + results = await asyncpg_session.execute_stack(stack) + + assert results[0].metadata is not None + assert results[0].metadata.get("prepared_statement") is True + assert results[1].metadata is not None + assert results[1].metadata.get("prepared_statement") is True diff --git a/tests/integration/test_adapters/test_bigquery/test_driver.py b/tests/integration/test_adapters/test_bigquery/test_driver.py index b2a37bcfe..8530ec8f1 100644 --- a/tests/integration/test_adapters/test_bigquery/test_driver.py +++ b/tests/integration/test_adapters/test_bigquery/test_driver.py @@ -8,7 +8,7 @@ from pytest_databases.docker.bigquery import BigQueryService from sqlspec.adapters.bigquery import BigQueryDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -218,6 +218,50 @@ def test_bigquery_complex_queries(bigquery_session: BigQueryDriver, driver_test_ assert subquery_result.data[1]["name"] == "Charlie" +def test_bigquery_statement_stack_sequential(bigquery_session: BigQueryDriver, driver_test_table: str) -> None: + """StatementStack executions should remain sequential on BigQuery.""" + + bigquery_session.execute(f"DELETE FROM {driver_test_table}") + + stack = ( + StatementStack() + .push_execute(f"INSERT INTO {driver_test_table} (id, name, value) VALUES (?, ?, ?)", (1, "stack-one", 10)) + .push_execute(f"INSERT INTO {driver_test_table} (id, name, value) VALUES (?, ?, ?)", (2, "stack-two", 20)) + .push_execute(f"SELECT COUNT(*) AS total FROM {driver_test_table} WHERE name LIKE ?", ("stack-%",)) + ) + + results = bigquery_session.execute_stack(stack) + + assert len(results) == 3 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total"] == 2 + + +def test_bigquery_statement_stack_continue_on_error(bigquery_session: BigQueryDriver, driver_test_table: str) -> None: + """Continue-on-error should surface BigQuery failures but keep executing.""" + + bigquery_session.execute(f"DELETE FROM {driver_test_table}") + + stack = ( + StatementStack() + .push_execute(f"INSERT INTO {driver_test_table} (id, name, value) VALUES (?, ?, ?)", (1, "stack-initial", 50)) + .push_execute( # invalid column triggers deterministic error + f"INSERT INTO {driver_test_table} (nonexistent_column) VALUES (1)" + ) + .push_execute(f"INSERT INTO {driver_test_table} (id, name, value) VALUES (?, ?, ?)", (2, "stack-final", 75)) + ) + + results = bigquery_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[1].error is not None + + verify = bigquery_session.execute(f"SELECT COUNT(*) AS total FROM {driver_test_table}") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + def test_bigquery_schema_operations(bigquery_session: BigQueryDriver, bigquery_service: BigQueryService) -> None: """Test schema operations (DDL).""" diff --git a/tests/integration/test_adapters/test_duckdb/test_driver.py b/tests/integration/test_adapters/test_duckdb/test_driver.py index 2a76a8fc9..323e454d2 100644 --- a/tests/integration/test_adapters/test_duckdb/test_driver.py +++ b/tests/integration/test_adapters/test_duckdb/test_driver.py @@ -6,7 +6,7 @@ import pytest from sqlspec.adapters.duckdb import DuckDBDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("duckdb") @@ -603,3 +603,49 @@ def test_duckdb_for_share_locking(duckdb_session: DuckDBDriver) -> None: raise finally: duckdb_session.execute_script("DROP TABLE IF EXISTS test_table") + + +def test_duckdb_statement_stack_sequential(duckdb_session: DuckDBDriver) -> None: + """DuckDB drivers should use sequential stack execution.""" + + duckdb_session.execute("DELETE FROM test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "duckdb-stack-one")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (2, "duckdb-stack-two")) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("duckdb-stack-%",)) + ) + + results = duckdb_session.execute_stack(stack) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total"] == 2 + + +def test_duckdb_statement_stack_continue_on_error(duckdb_session: DuckDBDriver) -> None: + """DuckDB sequential stack execution should honor continue-on-error.""" + + duckdb_session.execute("DELETE FROM test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "duckdb-initial")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "duckdb-duplicate")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (2, "duckdb-final")) + ) + + results = duckdb_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = duckdb_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 diff --git a/tests/integration/test_adapters/test_oracledb/test_stack.py b/tests/integration/test_adapters/test_oracledb/test_stack.py new file mode 100644 index 000000000..d56853d87 --- /dev/null +++ b/tests/integration/test_adapters/test_oracledb/test_stack.py @@ -0,0 +1,144 @@ +# pyright: reportPrivateUsage=false + +"""Integration tests for Oracle StatementStack execution paths.""" + +from __future__ import annotations + +from typing import Any + +import pytest + +from sqlspec.adapters.oracledb import OracleAsyncDriver, OracleSyncDriver +from sqlspec.core import StackExecutionError, StatementStack + +pytestmark = pytest.mark.xdist_group("oracle") + + +DROP_TEMPLATE = """ +BEGIN + EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; +EXCEPTION + WHEN OTHERS THEN + IF SQLCODE != -942 THEN + RAISE; + END IF; +END; +""" + +CREATE_TEMPLATE = """ +CREATE TABLE {table_name} ( + id NUMBER PRIMARY KEY, + name VARCHAR2(50) +) +""" + + +async def _reset_async_table(driver: OracleAsyncDriver, table_name: str) -> None: + await driver.execute_script(DROP_TEMPLATE.format(table_name=table_name)) + await driver.execute_script(CREATE_TEMPLATE.format(table_name=table_name)) + + +def _reset_sync_table(driver: OracleSyncDriver, table_name: str) -> None: + driver.execute_script(DROP_TEMPLATE.format(table_name=table_name)) + driver.execute_script(CREATE_TEMPLATE.format(table_name=table_name)) + + +@pytest.mark.asyncio(loop_scope="function") +async def test_async_statement_stack_native_pipeline( + monkeypatch: pytest.MonkeyPatch, oracle_async_session: OracleAsyncDriver +) -> None: + """Verify StatementStack execution routes through the native pipeline when supported.""" + + if not await oracle_async_session._pipeline_native_supported(): + pytest.skip("Native pipeline support unavailable for current Oracle version") + + table_name = "stack_async_pipeline" + await _reset_async_table(oracle_async_session, table_name) + + call_counter = {"count": 0} + original_execute_stack_native = OracleAsyncDriver._execute_stack_native + + async def tracking_execute_stack_native( + self: OracleAsyncDriver, stack: StatementStack, *, continue_on_error: bool + ) -> tuple[Any, ...]: + call_counter["count"] += 1 + return await original_execute_stack_native(self, stack, continue_on_error=continue_on_error) + + monkeypatch.setattr(OracleAsyncDriver, "_execute_stack_native", tracking_execute_stack_native) + + stack = ( + StatementStack() + .push_execute(f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 1, "name": "alpha"}) + .push_execute(f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 2, "name": "beta"}) + .push_execute(f"SELECT name FROM {table_name} WHERE id = :id", {"id": 2}) + ) + + results = await oracle_async_session.execute_stack(stack) + + assert call_counter["count"] == 1, "Native pipeline was not invoked" + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["name"] == "beta" + + await oracle_async_session.execute_script(DROP_TEMPLATE.format(table_name=table_name)) + + +@pytest.mark.asyncio(loop_scope="function") +async def test_async_statement_stack_continue_on_error_pipeline(oracle_async_session: OracleAsyncDriver) -> None: + """Ensure continue-on-error surfaces failures while executing remaining operations.""" + + if not await oracle_async_session._pipeline_native_supported(): + pytest.skip("Native pipeline support unavailable for current Oracle version") + + table_name = "stack_async_errors" + await _reset_async_table(oracle_async_session, table_name) + + stack = ( + StatementStack() + .push_execute(f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 1, "name": "alpha"}) + .push_execute( # duplicate PK to trigger ORA-00001 + f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 1, "name": "duplicate"} + ) + .push_execute(f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 2, "name": "beta"}) + ) + + results = await oracle_async_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert isinstance(results[1].error, StackExecutionError) + assert results[2].rowcount == 1 + + verify_result = await oracle_async_session.execute( + f"SELECT COUNT(*) as total_rows FROM {table_name} WHERE id = :id", {"id": 2} + ) + assert verify_result.data is not None + assert verify_result.data[0]["total_rows"] == 1 + + await oracle_async_session.execute_script(DROP_TEMPLATE.format(table_name=table_name)) + + +def test_sync_statement_stack_sequential_fallback(oracle_sync_session: OracleSyncDriver) -> None: + """Sync driver should execute stacks sequentially when pipelines are unavailable.""" + + table_name = "stack_sync_pipeline" + _reset_sync_table(oracle_sync_session, table_name) + + stack = ( + StatementStack() + .push_execute(f"INSERT INTO {table_name} (id, name) VALUES (:id, :name)", {"id": 1, "name": "sync-alpha"}) + .push_execute(f"SELECT name FROM {table_name} WHERE id = :id", {"id": 1}) + ) + + results = oracle_sync_session.execute_stack(stack) + + assert len(results) == 2 + assert results[0].rowcount == 1 + assert results[1].raw_result is not None + assert results[1].raw_result.data is not None + assert results[1].raw_result.data[0]["name"] == "sync-alpha" + + oracle_sync_session.execute_script(DROP_TEMPLATE.format(table_name=table_name)) diff --git a/tests/integration/test_adapters/test_psqlpy/test_driver.py b/tests/integration/test_adapters/test_psqlpy/test_driver.py index e323bd900..f21672f02 100644 --- a/tests/integration/test_adapters/test_psqlpy/test_driver.py +++ b/tests/integration/test_adapters/test_psqlpy/test_driver.py @@ -7,7 +7,7 @@ import pytest from sqlspec.adapters.psqlpy import PsqlpyDriver -from sqlspec.core import SQL, SQLResult +from sqlspec.core import SQL, SQLResult, StatementStack if TYPE_CHECKING: pass @@ -199,6 +199,51 @@ async def test_multiple_positional_parameters(psqlpy_session: PsqlpyDriver) -> N assert len(mixed_result.data) == 1 +async def test_psqlpy_statement_stack_sequential(psqlpy_session: PsqlpyDriver) -> None: + """psqlpy uses sequential stack execution.""" + + await psqlpy_session.execute("DELETE FROM test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "psqlpy-stack-one")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (2, "psqlpy-stack-two")) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("psqlpy-stack-%",)) + ) + + results = await psqlpy_session.execute_stack(stack) + + assert len(results) == 3 + + verify = await psqlpy_session.execute( + "SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("psqlpy-stack-%",) + ) + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + +async def test_psqlpy_statement_stack_continue_on_error(psqlpy_session: PsqlpyDriver) -> None: + """Sequential stack execution should honor continue-on-error flag.""" + + await psqlpy_session.execute("DELETE FROM test_table") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "psqlpy-initial")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (1, "psqlpy-duplicate")) + .push_execute("INSERT INTO test_table (id, name) VALUES (?, ?)", (2, "psqlpy-final")) + ) + + results = await psqlpy_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[1].error is not None + + verify = await psqlpy_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + async def test_scalar_parameter_handling(psqlpy_session: PsqlpyDriver) -> None: """Test handling of scalar parameters in various contexts.""" diff --git a/tests/integration/test_adapters/test_psycopg/test_async_copy.py b/tests/integration/test_adapters/test_psycopg/test_async_copy.py index bc0888b9f..3da2fc116 100644 --- a/tests/integration/test_adapters/test_psycopg/test_async_copy.py +++ b/tests/integration/test_adapters/test_psycopg/test_async_copy.py @@ -8,7 +8,7 @@ from pytest_databases.docker.postgres import PostgresService from sqlspec.adapters.psycopg import PsycopgAsyncConfig, PsycopgAsyncDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("postgres") @@ -149,3 +149,55 @@ async def test_psycopg_async_copy_csv_format_keyword(psycopg_async_session: Psyc assert select_result.data[2]["value"] == 800 await psycopg_async_session.execute_script("DROP TABLE copy_csv_async_kw") + + +async def test_psycopg_async_statement_stack_pipeline(psycopg_async_session: PsycopgAsyncDriver) -> None: + """Validate that StatementStack leverages async pipeline mode.""" + + await psycopg_async_session.execute_script("TRUNCATE TABLE test_table_async RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table_async (id, name, value) VALUES (%s, %s, %s)", (1, "async-stack-one", 50)) + .push_execute("INSERT INTO test_table_async (id, name, value) VALUES (%s, %s, %s)", (2, "async-stack-two", 60)) + .push_execute("SELECT COUNT(*) AS total FROM test_table_async WHERE name LIKE %s", ("async-stack-%",)) + ) + + results = await psycopg_async_session.execute_stack(stack) + + assert len(results) == 3 + verify = await psycopg_async_session.execute( + "SELECT COUNT(*) AS total FROM test_table_async WHERE name LIKE %s", ("async-stack-%",) + ) + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + +async def test_psycopg_async_statement_stack_continue_on_error(psycopg_async_session: PsycopgAsyncDriver) -> None: + """Ensure async pipeline honors continue-on-error semantics.""" + + await psycopg_async_session.execute_script("TRUNCATE TABLE test_table_async RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute( + "INSERT INTO test_table_async (id, name, value) VALUES (%s, %s, %s)", (1, "async-stack-initial", 15) + ) + .push_execute( + "INSERT INTO test_table_async (id, name, value) VALUES (%s, %s, %s)", (1, "async-stack-duplicate", 25) + ) + .push_execute( + "INSERT INTO test_table_async (id, name, value) VALUES (%s, %s, %s)", (2, "async-stack-final", 35) + ) + ) + + results = await psycopg_async_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = await psycopg_async_session.execute("SELECT COUNT(*) AS total FROM test_table_async") + assert verify.data is not None + assert verify.data[0]["total"] == 2 diff --git a/tests/integration/test_adapters/test_psycopg/test_driver.py b/tests/integration/test_adapters/test_psycopg/test_driver.py index 50c42e9a6..1ec2cbe99 100644 --- a/tests/integration/test_adapters/test_psycopg/test_driver.py +++ b/tests/integration/test_adapters/test_psycopg/test_driver.py @@ -6,7 +6,7 @@ import pytest from sqlspec.adapters.psycopg import PsycopgSyncConfig, PsycopgSyncDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -18,6 +18,7 @@ def psycopg_session(psycopg_sync_config: PsycopgSyncConfig) -> Generator[Psycopg """Create a psycopg session with test table.""" with psycopg_sync_config.provide_session() as session: + session.execute_script("DROP TABLE IF EXISTS test_table") session.execute_script( """ CREATE TABLE IF NOT EXISTS test_table ( @@ -198,6 +199,55 @@ def test_psycopg_error_handling(psycopg_session: PsycopgSyncDriver) -> None: psycopg_session.execute("SELECT nonexistent_column FROM test_table") +def test_psycopg_statement_stack_pipeline(psycopg_session: PsycopgSyncDriver) -> None: + """StatementStack should leverage psycopg pipeline mode when available.""" + + psycopg_session.execute("TRUNCATE TABLE test_table RESTART IDENTITY") + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (%s, %s, %s)", (1, "sync-stack-one", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (%s, %s, %s)", (2, "sync-stack-two", 15)) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE %s", ("sync-stack-%",)) + ) + + results = psycopg_session.execute_stack(stack) + + assert len(results) == 3 + total_result = psycopg_session.execute( + "SELECT COUNT(*) AS total FROM test_table WHERE name LIKE %s", "sync-stack-%" + ) + assert total_result.data is not None + assert total_result.data[0]["total"] == 2 + + +def test_psycopg_statement_stack_continue_on_error(psycopg_session: PsycopgSyncDriver) -> None: + """Pipeline execution should continue when instructed to handle errors.""" + + psycopg_session.execute("TRUNCATE TABLE test_table RESTART IDENTITY") + psycopg_session.commit() + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (%s, %s, %s)", (1, "sync-initial", 10)) + .push_execute( # duplicate PK triggers error + "INSERT INTO test_table (id, name, value) VALUES (%s, %s, %s)", (1, "sync-duplicate", 20) + ) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (%s, %s, %s)", (2, "sync-success-final", 30)) + ) + + results = psycopg_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[1].error is not None + assert results[0].error is None + assert results[2].error is None + + verify = psycopg_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + def test_psycopg_data_types(psycopg_session: PsycopgSyncDriver) -> None: """Test PostgreSQL data type handling with psycopg.""" @@ -255,6 +305,9 @@ def test_psycopg_data_types(psycopg_session: PsycopgSyncDriver) -> None: def test_psycopg_transactions(psycopg_session: PsycopgSyncDriver) -> None: """Test transaction behavior.""" + psycopg_session.execute("TRUNCATE TABLE test_table RESTART IDENTITY") + psycopg_session.commit() + psycopg_session.execute("INSERT INTO test_table (name, value) VALUES (%s, %s)", "transaction_test", 100) result = psycopg_session.execute("SELECT COUNT(*) as count FROM test_table WHERE name = %s", ("transaction_test")) @@ -266,6 +319,9 @@ def test_psycopg_transactions(psycopg_session: PsycopgSyncDriver) -> None: def test_psycopg_complex_queries(psycopg_session: PsycopgSyncDriver) -> None: """Test complex SQL queries.""" + psycopg_session.execute("TRUNCATE TABLE test_table RESTART IDENTITY") + psycopg_session.commit() + test_data = [("Alice", 25), ("Bob", 30), ("Charlie", 35), ("Diana", 28)] psycopg_session.execute_many("INSERT INTO test_table (name, value) VALUES (%s, %s)", test_data) diff --git a/tests/integration/test_adapters/test_sqlite/test_driver.py b/tests/integration/test_adapters/test_sqlite/test_driver.py index 79a73b59d..f521b6146 100644 --- a/tests/integration/test_adapters/test_sqlite/test_driver.py +++ b/tests/integration/test_adapters/test_sqlite/test_driver.py @@ -6,7 +6,7 @@ import pytest from sqlspec.adapters.sqlite import SqliteDriver -from sqlspec.core import SQLResult +from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("sqlite") ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -205,6 +205,54 @@ def test_sqlite_data_types(sqlite_session: SqliteDriver) -> None: assert row["null_col"] is None +def test_sqlite_statement_stack_sequential(sqlite_session: SqliteDriver) -> None: + """StatementStack should execute sequentially for SQLite.""" + + sqlite_session.execute("DELETE FROM test_table") + sqlite_session.commit() + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "sqlite-stack-one", 100)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "sqlite-stack-two", 200)) + .push_execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("sqlite-stack-%",)) + ) + + results = sqlite_session.execute_stack(stack) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].rowcount == 1 + assert results[2].raw_result is not None + assert results[2].raw_result.data is not None + assert results[2].raw_result.data[0]["total"] == 2 + + +def test_sqlite_statement_stack_continue_on_error(sqlite_session: SqliteDriver) -> None: + """Sequential fallback should honor continue-on-error mode.""" + + sqlite_session.execute("DELETE FROM test_table") + sqlite_session.commit() + + stack = ( + StatementStack() + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "sqlite-initial", 5)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (1, "sqlite-duplicate", 15)) + .push_execute("INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)", (2, "sqlite-final", 25)) + ) + + results = sqlite_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[0].rowcount == 1 + assert results[1].error is not None + assert results[2].rowcount == 1 + + verify = sqlite_session.execute("SELECT COUNT(*) AS total FROM test_table") + assert verify.data is not None + assert verify.data[0]["total"] == 2 + + def test_sqlite_transactions(sqlite_session: SqliteDriver) -> None: """Test transaction behavior.""" diff --git a/tests/integration/test_stack_edge_cases.py b/tests/integration/test_stack_edge_cases.py new file mode 100644 index 000000000..bde083ae7 --- /dev/null +++ b/tests/integration/test_stack_edge_cases.py @@ -0,0 +1,183 @@ +"""Cross-adapter StatementStack edge cases exercised against SQLite.""" + +from collections.abc import Generator + +import pytest + +from sqlspec.adapters.sqlite import SqliteConfig, SqliteDriver +from sqlspec.core import StatementStack +from sqlspec.exceptions import StackExecutionError + +pytestmark = pytest.mark.xdist_group("sqlite") + + +@pytest.fixture() +def sqlite_stack_session() -> "Generator[SqliteDriver, None, None]": + config = SqliteConfig(pool_config={"database": ":memory:"}) + with config.provide_session() as session: + session.execute_script( + """ + CREATE TABLE IF NOT EXISTS stack_edge_table ( + id INTEGER PRIMARY KEY, + name TEXT NOT NULL, + notes TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + ); + DELETE FROM stack_edge_table; + """ + ) + session.commit() + yield session + config.close_pool() + + +def _table_count(session: "SqliteDriver") -> int: + result = session.execute("SELECT COUNT(*) AS total FROM stack_edge_table") + assert result.data is not None + return int(result.data[0]["total"]) + + +def test_execute_stack_requires_operations(sqlite_stack_session: "SqliteDriver") -> None: + with pytest.raises(ValueError, match="Cannot execute an empty StatementStack"): + sqlite_stack_session.execute_stack(StatementStack()) + + +def test_single_operation_stack_matches_execute(sqlite_stack_session: "SqliteDriver") -> None: + stack = StatementStack().push_execute( + "INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "solo", None) + ) + + results = sqlite_stack_session.execute_stack(stack) + + assert len(results) == 1 + assert results[0].rowcount == 1 + assert _table_count(sqlite_stack_session) == 1 + + +def test_stack_with_only_select_operations(sqlite_stack_session: "SqliteDriver") -> None: + sqlite_stack_session.execute( + "INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "alpha", "note") + ) + sqlite_stack_session.execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (2, "beta", "note")) + + stack = ( + StatementStack() + .push_execute("SELECT name FROM stack_edge_table WHERE id = ?", (1,)) + .push_execute("SELECT COUNT(*) AS total FROM stack_edge_table", ()) + ) + + results = sqlite_stack_session.execute_stack(stack) + + first_result = results[0].raw_result + second_result = results[1].raw_result + assert first_result is not None + assert second_result is not None + assert first_result.data is not None + assert second_result.data is not None + assert first_result.data[0]["name"] == "alpha" + assert second_result.data[0]["total"] == 2 + + +def test_large_stack_of_mixed_operations(sqlite_stack_session: "SqliteDriver") -> None: + stack = StatementStack() + for idx in range(1, 51): + stack = stack.push_execute( + "INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (idx, f"user-{idx}", None) + ) + stack = stack.push_execute("SELECT COUNT(*) AS total FROM stack_edge_table", ()) + + results = sqlite_stack_session.execute_stack(stack) + + assert len(results) == 51 + final_result = results[-1].raw_result + assert final_result is not None + assert final_result.data is not None + assert final_result.data[0]["total"] == 50 + + +def test_fail_fast_rolls_back_new_transaction(sqlite_stack_session: "SqliteDriver") -> None: + stack = ( + StatementStack() + .push_execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "first", None)) + .push_execute("INSERT INTO missing_table VALUES (1)") + ) + + with pytest.raises(StackExecutionError): + sqlite_stack_session.execute_stack(stack) + + assert _table_count(sqlite_stack_session) == 0 + + +def test_continue_on_error_commits_successes(sqlite_stack_session: "SqliteDriver") -> None: + stack = ( + StatementStack() + .push_execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "ok", None)) + .push_execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "duplicate", None)) + .push_execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (2, "ok", None)) + ) + + results = sqlite_stack_session.execute_stack(stack, continue_on_error=True) + + assert len(results) == 3 + assert results[1].error is not None + assert _table_count(sqlite_stack_session) == 2 + + +def test_parameter_edge_cases(sqlite_stack_session: "SqliteDriver") -> None: + stack = ( + StatementStack() + .push_execute("INSERT INTO stack_edge_table (id, name, notes) VALUES (?, ?, ?)", (1, "nullable", None)) + .push_execute( + "INSERT INTO stack_edge_table (id, name, notes) VALUES (:id, :name, :notes)", + {"id": 2, "name": "dict", "notes": ""}, + ) + .push_execute("SELECT notes FROM stack_edge_table WHERE id = ?", (1,)) + ) + + results = sqlite_stack_session.execute_stack(stack) + third_result = results[2].raw_result + assert third_result is not None + assert third_result.data is not None + assert third_result.data[0]["notes"] is None + + +def test_stack_with_existing_transaction(sqlite_stack_session: "SqliteDriver") -> None: + sqlite_stack_session.begin() + stack = ( + StatementStack() + .push_execute("INSERT INTO stack_edge_table (id, name) VALUES (?, ?)", (1, "tx")) + .push_execute("INSERT INTO stack_edge_table (id, name) VALUES (?, ?)", (2, "tx")) + ) + + sqlite_stack_session.execute_stack(stack) + assert sqlite_stack_session.connection.in_transaction is True + + sqlite_stack_session.rollback() + assert _table_count(sqlite_stack_session) == 0 + + +def test_stack_creates_transaction_when_needed(sqlite_stack_session: "SqliteDriver") -> None: + stack = ( + StatementStack() + .push_execute("INSERT INTO stack_edge_table (id, name) VALUES (?, ?)", (1, "auto")) + .push_execute("INSERT INTO stack_edge_table (id, name) VALUES (?, ?)", (2, "auto")) + ) + + sqlite_stack_session.execute_stack(stack) + assert sqlite_stack_session.connection.in_transaction is False + assert _table_count(sqlite_stack_session) == 2 + + +def test_stack_single_statement_selects_inside_existing_transaction(sqlite_stack_session: "SqliteDriver") -> None: + sqlite_stack_session.begin() + sqlite_stack_session.execute("INSERT INTO stack_edge_table (id, name) VALUES (?, ?)", (1, "pre")) + + stack = StatementStack().push_execute("SELECT name FROM stack_edge_table WHERE id = ?", (1,)) + + results = sqlite_stack_session.execute_stack(stack) + select_result = results[0].raw_result + assert select_result is not None + assert select_result.data is not None + assert select_result.data[0]["name"] == "pre" + + sqlite_stack_session.rollback() diff --git a/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py b/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py new file mode 100644 index 000000000..caf638dce --- /dev/null +++ b/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py @@ -0,0 +1,121 @@ +# pyright: reportPrivateUsage=false + +from typing import Any, cast + +import pytest + +pytest.importorskip("oracledb") + +from sqlspec.adapters.oracledb._types import OracleAsyncConnection +from sqlspec.adapters.oracledb.driver import OracleAsyncDriver, oracledb_statement_config +from sqlspec.core import StatementStack +from sqlspec.driver._common import StackExecutionObserver + + +class _StubAsyncConnection: + """Minimal async connection stub for OracleAsyncDriver tests.""" + + def __init__(self) -> None: + self.in_transaction = False + + +class _StubPipelineResult: + """Pipeline result stub for driver helper tests.""" + + def __init__( + self, + *, + rows: list[tuple[Any, ...]] | None = None, + columns: list[Any] | None = None, + warning: Any | None = None, + error: Exception | None = None, + rowcount: int | None = None, + ) -> None: + self.rows = rows + self.columns = columns + self.warning = warning + self.error = error + self.rowcount = rowcount + self.return_value = None + + +class _StubObserver: + """Observer stub capturing recorded errors.""" + + def __init__(self) -> None: + self.errors: list[Exception] = [] + + def record_operation_error(self, error: Exception) -> None: + self.errors.append(error) + + +class _StubColumn: + """Simple column metadata stub.""" + + def __init__(self, name: str) -> None: + self.name = name + + +def _make_driver() -> OracleAsyncDriver: + connection = cast("OracleAsyncConnection", _StubAsyncConnection()) + return OracleAsyncDriver(connection=connection, statement_config=oracledb_statement_config, driver_features={}) + + +def test_stack_native_blocker_detects_arrow() -> None: + driver = _make_driver() + stack = StatementStack().push_execute_arrow("SELECT * FROM dual") + assert driver._stack_native_blocker(stack) == "arrow_operation" + + +def test_stack_native_blocker_detects_script() -> None: + driver = _make_driver() + stack = StatementStack().push_execute_script("BEGIN NULL; END;") + assert driver._stack_native_blocker(stack) == "script_operation" + + +def test_stack_native_blocker_allows_standard_operations() -> None: + driver = _make_driver() + stack = StatementStack().push_execute("SELECT 1 FROM dual") + assert driver._stack_native_blocker(stack) is None + + +def test_pipeline_result_to_stack_result_uses_rowcount_attr() -> None: + driver = _make_driver() + stack = StatementStack().push_execute("SELECT 1 FROM dual") + compiled = driver._prepare_pipeline_operation(stack.operations[0]) + pipeline_result = _StubPipelineResult(rows=[(1,)], columns=[_StubColumn("VALUE")], warning="warn", rowcount=7) + + stack_result = driver._pipeline_result_to_stack_result(compiled, pipeline_result) + + assert stack_result.rowcount == 7 + assert stack_result.warning == "warn" + raw_result = stack_result.raw_result + assert raw_result is not None + assert raw_result.metadata is not None + assert raw_result.metadata["pipeline_operation"] == "execute" + + +def test_pipeline_result_execute_many_rowcount_fallback() -> None: + driver = _make_driver() + stack = StatementStack().push_execute_many("INSERT INTO demo VALUES (:1)", [(1,), (2,)]) + compiled = driver._prepare_pipeline_operation(stack.operations[0]) + pipeline_result = _StubPipelineResult() + + stack_result = driver._pipeline_result_to_stack_result(compiled, pipeline_result) + + assert stack_result.rowcount == 2 + + +def test_build_stack_results_records_errors() -> None: + driver = _make_driver() + stack = StatementStack().push_execute("SELECT 1 FROM dual") + compiled = driver._prepare_pipeline_operation(stack.operations[0]) + observer_stub = _StubObserver() + observer = cast(StackExecutionObserver, observer_stub) + + results = driver._build_stack_results_from_pipeline( + (compiled,), (_StubPipelineResult(error=RuntimeError("boom")),), True, observer + ) + + assert results[0].error is not None + assert len(observer_stub.errors) == 1 diff --git a/tests/unit/test_config_resolver.py b/tests/unit/test_config_resolver.py index 27225cd89..1bb57d67b 100644 --- a/tests/unit/test_config_resolver.py +++ b/tests/unit/test_config_resolver.py @@ -172,7 +172,7 @@ async def test_config_class_rejected(self) -> None: When using resolve_config_*, classes are callable and get instantiated, so they don't reach direct validation as classes. """ - from sqlspec.utils.config_resolver import _is_valid_config + from sqlspec.utils.config_resolver import _is_valid_config # pyright: ignore[reportPrivateUsage] class MockConfigClass: """Mock config class to simulate config classes being passed.""" diff --git a/tests/unit/test_core/test_stack.py b/tests/unit/test_core/test_stack.py index abaf9cc23..d3943c439 100644 --- a/tests/unit/test_core/test_stack.py +++ b/tests/unit/test_core/test_stack.py @@ -4,7 +4,7 @@ import pytest -from sqlspec.core import StackOperation, StatementStack +from sqlspec.core import StackOperation, StatementConfig, StatementStack pytestmark = pytest.mark.xdist_group("core") @@ -38,16 +38,14 @@ def test_push_execute_script_requires_non_empty_sql() -> None: def test_push_execute_many_stores_filters_and_kwargs() -> None: stack = StatementStack().push_execute_many( - "INSERT", - [{"x": 1}], - {"filter": True}, - statement_config=None, - chunk_size=50, + "INSERT", [{"x": 1}], {"filter": True}, statement_config=None, chunk_size=50 ) operation = stack.operations[0] assert operation.method == "execute_many" - assert operation.arguments[0] == ({"x": 1},) - assert operation.arguments[1] == {"filter": True} + arguments = operation.arguments + assert len(arguments) >= 2 + assert arguments[0] == ({"x": 1},) + assert arguments[1] == {"filter": True} assert operation.keyword_arguments is not None assert operation.keyword_arguments["chunk_size"] == 50 @@ -65,14 +63,11 @@ def test_extend_and_from_operations() -> None: def test_reject_nested_stack() -> None: stack = StatementStack() with pytest.raises(TypeError, match="Nested StatementStack"): - stack.push_execute(stack) + stack.push_execute(stack) # type: ignore[arg-type] def test_freeze_kwargs_includes_statement_config() -> None: - class DummyConfig: - pass - - config = DummyConfig() + config = StatementConfig() stack = StatementStack().push_execute("SELECT 1", statement_config=config) operation = stack.operations[0] assert operation.keyword_arguments is not None @@ -91,7 +86,9 @@ def test_push_execute_arrow_records_kwargs() -> None: ) operation = stack.operations[0] assert operation.method == "execute_arrow" - assert operation.arguments[0] == {"limit": 10} + arguments = operation.arguments + assert arguments + assert arguments[0] == {"limit": 10} assert operation.keyword_arguments is not None assert operation.keyword_arguments["return_format"] == "batch" assert operation.keyword_arguments["native_only"] is True diff --git a/tests/unit/test_core/test_stack_metrics.py b/tests/unit/test_core/test_stack_metrics.py new file mode 100644 index 000000000..b5b3a8f8b --- /dev/null +++ b/tests/unit/test_core/test_stack_metrics.py @@ -0,0 +1,42 @@ +from sqlspec.core.metrics import StackExecutionMetrics +from sqlspec.observability import ObservabilityRuntime + + +def test_stack_execution_metrics_emit() -> None: + runtime = ObservabilityRuntime(config_name="TestDriver") + metrics = StackExecutionMetrics( + adapter="OracleAsyncDriver", + statement_count=3, + continue_on_error=False, + native_pipeline=False, + forced_disable=False, + ) + metrics.record_duration(0.25) + metrics.emit(runtime) + + snapshot = runtime.metrics_snapshot() + assert snapshot["TestDriver.stack.execute.invocations"] == 1.0 + assert snapshot["TestDriver.stack.execute.statements"] == 3.0 + assert snapshot["TestDriver.stack.execute.mode.failfast"] == 1.0 + assert snapshot["TestDriver.stack.execute.path.sequential"] == 1.0 + assert snapshot["TestDriver.stack.execute.duration_ms"] == 250.0 + + +def test_stack_execution_metrics_partial_errors() -> None: + runtime = ObservabilityRuntime(config_name="TestDriver") + metrics = StackExecutionMetrics( + adapter="OracleAsyncDriver", + statement_count=2, + continue_on_error=True, + native_pipeline=True, + forced_disable=True, + ) + metrics.record_operation_error(RuntimeError("boom")) + metrics.record_duration(0.1) + metrics.emit(runtime) + + snapshot = runtime.metrics_snapshot() + assert snapshot["TestDriver.stack.execute.mode.continue"] == 1.0 + assert snapshot["TestDriver.stack.execute.path.native"] == 1.0 + assert snapshot["TestDriver.stack.execute.override.forced"] == 1.0 + assert snapshot["TestDriver.stack.execute.partial_errors"] == 1.0 diff --git a/tools/run_pre_commit.py b/tools/run_pre_commit.py new file mode 100755 index 000000000..1bb99e5b2 --- /dev/null +++ b/tools/run_pre_commit.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +"""Run pre-commit hooks without requiring PTY support.""" + +import os +from typing import TYPE_CHECKING + +from pre_commit import util as pre_commit_util +from pre_commit.main import main as pre_commit_main + +if TYPE_CHECKING: + from types import TracebackType + + +class _PipePty: + """Lightweight replacement for pre-commit's PTY helper.""" + + __slots__ = ("r", "w") + + def __init__(self) -> None: + self.r: int | None = None + self.w: int | None = None + + def __enter__(self) -> "_PipePty": + self.r, self.w = os.pipe() + return self + + def close_w(self) -> None: + if self.w is not None: + os.close(self.w) + self.w = None + + def close_r(self) -> None: + if self.r is not None: + os.close(self.r) + self.r = None + + def __exit__( + self, + exc_type: "type[BaseException] | None", + exc_value: "BaseException | None", + traceback: "TracebackType | None", + ) -> None: + self.close_w() + self.close_r() + + +pre_commit_util.Pty = _PipePty # type: ignore[assignment] + + +def main() -> int: + """Invoke pre-commit with patched PTY handling.""" + + return pre_commit_main() + + +if __name__ == "__main__": + raise SystemExit(main()) From fc65ef10ebc79787ab55bc77b30a2d992adf32bd Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Thu, 13 Nov 2025 00:12:59 +0000 Subject: [PATCH 04/10] fix: refactor code structure for improved readability and maintainability --- docs/changelog.rst | 2 +- docs/examples/README.md | 1 + docs/examples/arrow/__init__.py | 3 + .../examples/{ => arrow}/arrow_basic_usage.py | 0 docs/examples/arrow/arrow_basic_usage.rst | 16 ++ docs/examples/index.rst | 16 +- docs/examples/patterns/stacks/__init__.py | 3 + .../patterns/stacks/query_stack_example.py | 106 ++++++++++ .../stacks}/query_stack_example.rst | 4 +- docs/examples/query_stack_example.py | 98 --------- specs/guides/query-stack.md | 4 +- uv.lock | 196 +++++++++--------- 12 files changed, 249 insertions(+), 200 deletions(-) create mode 100644 docs/examples/arrow/__init__.py rename docs/examples/{ => arrow}/arrow_basic_usage.py (100%) create mode 100644 docs/examples/arrow/arrow_basic_usage.rst create mode 100644 docs/examples/patterns/stacks/__init__.py create mode 100644 docs/examples/patterns/stacks/query_stack_example.py rename docs/examples/{ => patterns/stacks}/query_stack_example.rst (79%) delete mode 100644 docs/examples/query_stack_example.py diff --git a/docs/changelog.rst b/docs/changelog.rst index 308a8b124..dda760685 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -14,7 +14,7 @@ Query Stack Documentation Suite -------------------------------- - Expanded the :doc:`/reference/query-stack` API reference (``StatementStack``, ``StackResult``, driver hooks, and ``StackExecutionError``) with the high-level workflow, execution modes, telemetry, and troubleshooting tips. -- Added :doc:`/examples/query_stack_example` that runs the same stack against SQLite and AioSQLite. +- Added :doc:`/examples/patterns/stacks/query_stack_example` that runs the same stack against SQLite and AioSQLite. - Captured the detailed architecture and performance guidance inside the internal specs workspace for future agent runs. - Updated every adapter reference with a **Query Stack Support** section so behavior is documented per database. diff --git a/docs/examples/README.md b/docs/examples/README.md index 87b32caa7..3283f5f31 100644 --- a/docs/examples/README.md +++ b/docs/examples/README.md @@ -6,6 +6,7 @@ This directory now mirrors the way developers explore SQLSpec: - `frameworks/` groups runnable apps (Litestar for now) that rely on lightweight backends (aiosqlite, duckdb). - `adapters/` holds connection-focused snippets for production drivers such as asyncpg, psycopg, and oracledb. - `patterns/` demonstrates SQL builder usage, migrations, and multi-tenant routing. +- `arrow/` collects Arrow integration demos so advanced exports stay discoverable without bloating other folders. - `loaders/` shows how to hydrate SQL from files for quick demos. - `extensions/` keeps integration-specific samples (Adapter Development Kit in this pass). diff --git a/docs/examples/arrow/__init__.py b/docs/examples/arrow/__init__.py new file mode 100644 index 000000000..6aac6a1fc --- /dev/null +++ b/docs/examples/arrow/__init__.py @@ -0,0 +1,3 @@ +"""Arrow integration examples for SQLSpec.""" + +__all__ = () diff --git a/docs/examples/arrow_basic_usage.py b/docs/examples/arrow/arrow_basic_usage.py similarity index 100% rename from docs/examples/arrow_basic_usage.py rename to docs/examples/arrow/arrow_basic_usage.py diff --git a/docs/examples/arrow/arrow_basic_usage.rst b/docs/examples/arrow/arrow_basic_usage.rst new file mode 100644 index 000000000..a81690d4a --- /dev/null +++ b/docs/examples/arrow/arrow_basic_usage.rst @@ -0,0 +1,16 @@ +Arrow: Basic Usage +================== + +Demonstrate the ``select_to_arrow()`` helper across multiple adapters and +conversion targets (native Arrow, pandas, polars, and Parquet exports). + +.. code-block:: console + + uv run python docs/examples/arrow/arrow_basic_usage.py + +Source +------ + +.. literalinclude:: arrow_basic_usage.py + :language: python + :linenos: diff --git a/docs/examples/index.rst b/docs/examples/index.rst index 01aa9a3c4..295e0facd 100644 --- a/docs/examples/index.rst +++ b/docs/examples/index.rst @@ -91,9 +91,20 @@ Patterns - Routing requests to dedicated SQLite configs per tenant slug. * - ``patterns/configs/multi_adapter_registry.py`` - Register multiple adapters on a single SQLSpec registry. - * - ``query_stack_example.py`` + * - ``patterns/stacks/query_stack_example.py`` - Immutable StatementStack workflow executed against SQLite and AioSQLite drivers. +Arrow +----- + +.. list-table:: Arrow-powered exports + :header-rows: 1 + + * - File + - Scenario + * - ``arrow/arrow_basic_usage.py`` + - ``select_to_arrow()`` walkthrough covering native Arrow, pandas, polars, and Parquet exports. + Loaders ------- @@ -144,5 +155,6 @@ Shared Utilities frameworks/starlette/aiosqlite_app frameworks/flask/sqlite_app patterns/configs/multi_adapter_registry - query_stack_example + patterns/stacks/query_stack_example + arrow/arrow_basic_usage README diff --git a/docs/examples/patterns/stacks/__init__.py b/docs/examples/patterns/stacks/__init__.py new file mode 100644 index 000000000..e075a3c4b --- /dev/null +++ b/docs/examples/patterns/stacks/__init__.py @@ -0,0 +1,3 @@ +"""Statement stack examples for SQLSpec documentation.""" + +__all__ = () diff --git a/docs/examples/patterns/stacks/query_stack_example.py b/docs/examples/patterns/stacks/query_stack_example.py new file mode 100644 index 000000000..bd1215640 --- /dev/null +++ b/docs/examples/patterns/stacks/query_stack_example.py @@ -0,0 +1,106 @@ +"""Demonstrate StatementStack usage across sync and async SQLite adapters.""" + +import asyncio +from typing import Any + +from sqlspec import SQLSpec +from sqlspec.adapters.aiosqlite import AiosqliteConfig +from sqlspec.adapters.sqlite import SqliteConfig +from sqlspec.core import StatementStack + +__all__ = ("build_stack", "main", "run_async_example", "run_sync_example") + +SCHEMA_SCRIPT = """ +CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_action TEXT); +CREATE TABLE IF NOT EXISTS audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + action TEXT NOT NULL +); +CREATE TABLE IF NOT EXISTS user_roles ( + user_id INTEGER NOT NULL, + role TEXT NOT NULL +); +""" + + +def build_stack(user_id: int, action: str) -> "StatementStack": + """Add audit, update, and select operations to the stack.""" + return ( + StatementStack() + .push_execute( + "INSERT INTO audit_log (user_id, action) VALUES (:user_id, :action)", {"user_id": user_id, "action": action} + ) + .push_execute( + "UPDATE users SET last_action = :action WHERE id = :user_id", {"action": action, "user_id": user_id} + ) + .push_execute("SELECT role FROM user_roles WHERE user_id = :user_id ORDER BY role", {"user_id": user_id}) + ) + + +def _seed_sync_tables(session: "Any", user_id: int, roles: "tuple[str, ...]") -> None: + """Create tables and seed sync demo data.""" + session.execute_script(SCHEMA_SCRIPT) + session.execute( + "INSERT INTO users (id, last_action) VALUES (:user_id, :action)", {"user_id": user_id, "action": "start"} + ) + session.execute_many( + "INSERT INTO user_roles (user_id, role) VALUES (:user_id, :role)", + [{"user_id": user_id, "role": role} for role in roles], + ) + + +async def _seed_async_tables(session: "Any", user_id: int, roles: "tuple[str, ...]") -> None: + """Create tables and seed async demo data.""" + await session.execute_script(SCHEMA_SCRIPT) + await session.execute( + "INSERT INTO users (id, last_action) VALUES (:user_id, :action)", {"user_id": user_id, "action": "start"} + ) + await session.execute_many( + "INSERT INTO user_roles (user_id, role) VALUES (:user_id, :role)", + [{"user_id": user_id, "role": role} for role in roles], + ) + + +def run_sync_example() -> None: + """Execute the stack with the synchronous SQLite adapter.""" + registry = SQLSpec() + config = registry.add_config(SqliteConfig(pool_config={"database": ":memory:"})) + with registry.provide_session(config) as session: + _seed_sync_tables(session, 1, ("admin", "editor")) + results = session.execute_stack(build_stack(user_id=1, action="sync-login")) + audit_insert, user_update, role_select = results + print("[sync] rows inserted:", audit_insert.rowcount) + print("[sync] rows updated:", user_update.rowcount) + if role_select.raw_result is not None: + roles = [row["role"] for row in role_select.raw_result.data] + print("[sync] roles:", roles) + + +def run_async_example() -> None: + """Execute the stack with the asynchronous AioSQLite adapter.""" + + async def _inner() -> None: + registry = SQLSpec() + config = registry.add_config(AiosqliteConfig(pool_config={"database": ":memory:"})) + async with registry.provide_session(config) as session: + await _seed_async_tables(session, 2, ("viewer",)) + results = await session.execute_stack(build_stack(user_id=2, action="async-login")) + audit_insert, user_update, role_select = results + print("[async] rows inserted:", audit_insert.rowcount) + print("[async] rows updated:", user_update.rowcount) + if role_select.raw_result is not None: + roles = [row["role"] for row in role_select.raw_result.data] + print("[async] roles:", roles) + + asyncio.run(_inner()) + + +def main() -> None: + """Run both sync and async StatementStack demonstrations.""" + run_sync_example() + run_async_example() + + +if __name__ == "__main__": + main() diff --git a/docs/examples/query_stack_example.rst b/docs/examples/patterns/stacks/query_stack_example.rst similarity index 79% rename from docs/examples/query_stack_example.rst rename to docs/examples/patterns/stacks/query_stack_example.rst index 8fce8468d..4f6228a65 100644 --- a/docs/examples/query_stack_example.rst +++ b/docs/examples/patterns/stacks/query_stack_example.rst @@ -10,13 +10,13 @@ This example builds an immutable ``StatementStack`` and executes it against both .. literalinclude:: query_stack_example.py :language: python - :caption: ``docs/examples/query_stack_example.py`` + :caption: ``docs/examples/patterns/stacks/query_stack_example.py`` :linenos: Run the script: .. code-block:: console - uv run python docs/examples/query_stack_example.py + uv run python docs/examples/patterns/stacks/query_stack_example.py Expected output shows inserted/updated row counts plus the projected role list for each adapter. diff --git a/docs/examples/query_stack_example.py b/docs/examples/query_stack_example.py deleted file mode 100644 index b4b9b7906..000000000 --- a/docs/examples/query_stack_example.py +++ /dev/null @@ -1,98 +0,0 @@ -import asyncio - -from sqlspec import SQLSpec -from sqlspec.adapters.aiosqlite import AiosqliteConfig -from sqlspec.adapters.sqlite import SqliteConfig -from sqlspec.core import StatementStack - - -def build_stack(user_id: int, action: str) -> "StatementStack": - stack = ( - StatementStack() - .push_execute( - "INSERT INTO audit_log (user_id, action) VALUES (:user_id, :action)", {"user_id": user_id, "action": action} - ) - .push_execute( - "UPDATE users SET last_action = :action WHERE id = :user_id", {"action": action, "user_id": user_id} - ) - .push_execute("SELECT role FROM user_roles WHERE user_id = :user_id ORDER BY role", {"user_id": user_id}) - ) - return stack - - -def run_sync_example() -> None: - sql = SQLSpec() - config = SqliteConfig(pool_config={"database": ":memory:"}) - registry = sql.add_config(config) - - with sql.provide_session(registry) as session: - session.execute_script( - """ - CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_action TEXT); - CREATE TABLE IF NOT EXISTS audit_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_id INTEGER NOT NULL, - action TEXT NOT NULL - ); - CREATE TABLE IF NOT EXISTS user_roles ( - user_id INTEGER NOT NULL, - role TEXT NOT NULL - ); - INSERT INTO users (id, last_action) VALUES (1, 'start'); - INSERT INTO user_roles (user_id, role) VALUES (1, 'admin'), (1, 'editor'); - """ - ) - - stack = build_stack(user_id=1, action="sync-login") - results = session.execute_stack(stack) - - audit_insert, user_update, role_select = results - print("[sync] rows inserted:", audit_insert.rowcount) - print("[sync] rows updated:", user_update.rowcount) - if role_select.raw_result is not None: - print("[sync] roles:", [row["role"] for row in role_select.raw_result.data]) - - -def run_async_example() -> None: - async def _inner() -> None: - sql = SQLSpec() - config = AiosqliteConfig(pool_config={"database": ":memory:"}) - registry = sql.add_config(config) - - async with sql.provide_session(registry) as session: - await session.execute_script( - """ - CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, last_action TEXT); - CREATE TABLE IF NOT EXISTS audit_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_id INTEGER NOT NULL, - action TEXT NOT NULL - ); - CREATE TABLE IF NOT EXISTS user_roles ( - user_id INTEGER NOT NULL, - role TEXT NOT NULL - ); - INSERT INTO users (id, last_action) VALUES (2, 'start'); - INSERT INTO user_roles (user_id, role) VALUES (2, 'viewer'); - """ - ) - - stack = build_stack(user_id=2, action="async-login") - results = await session.execute_stack(stack, continue_on_error=False) - - audit_insert, user_update, role_select = results - print("[async] rows inserted:", audit_insert.rowcount) - print("[async] rows updated:", user_update.rowcount) - if role_select.raw_result is not None: - print("[async] roles:", [row["role"] for row in role_select.raw_result.data]) - - asyncio.run(_inner()) - - -def main() -> None: - run_sync_example() - run_async_example() - - -if __name__ == "__main__": - main() diff --git a/specs/guides/query-stack.md b/specs/guides/query-stack.md index a5310a7ee..700585475 100644 --- a/specs/guides/query-stack.md +++ b/specs/guides/query-stack.md @@ -61,7 +61,7 @@ Adapters only need to report whether they used a native pipeline; the observer h ## Related Resources - [Query Stack API Reference](/reference/query-stack) -- :doc:`/examples/query_stack_example` +- :doc:`/examples/patterns/stacks/query_stack_example` - [Adapter Guides](/guides/adapters/) for native vs. fallback behavior per database -Use the new :doc:`/reference/query-stack` page for low-level API details and :doc:`/examples/query_stack_example` to see the end-to-end workflow. +Use the new :doc:`/reference/query-stack` page for low-level API details and :doc:`/examples/patterns/stacks/query_stack_example` to see the end-to-end workflow. diff --git a/uv.lock b/uv.lock index ae7fbacbc..4913d4a1e 100644 --- a/uv.lock +++ b/uv.lock @@ -123,7 +123,7 @@ wheels = [ [[package]] name = "aiobotocore" -version = "2.25.1" +version = "2.25.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -134,9 +134,9 @@ dependencies = [ { name = "python-dateutil" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/62/94/2e4ec48cf1abb89971cb2612d86f979a6240520f0a659b53a43116d344dc/aiobotocore-2.25.1.tar.gz", hash = "sha256:ea9be739bfd7ece8864f072ec99bb9ed5c7e78ebb2b0b15f29781fbe02daedbc", size = 120560, upload-time = "2025-10-28T22:33:21.787Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/48/cf3c88c5e3fecdeed824f97a8a98a9fc0d7ef33e603f8f22c2fd32b9ef09/aiobotocore-2.25.2.tar.gz", hash = "sha256:ae0a512b34127097910b7af60752956254099ae54402a84c2021830768f92cda", size = 120585, upload-time = "2025-11-11T18:51:28.056Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/2a/d275ec4ce5cd0096665043995a7d76f5d0524853c76a3d04656de49f8808/aiobotocore-2.25.1-py3-none-any.whl", hash = "sha256:eb6daebe3cbef5b39a0bb2a97cffbe9c7cb46b2fcc399ad141f369f3c2134b1f", size = 86039, upload-time = "2025-10-28T22:33:19.949Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ad/a2f3964aa37da5a4c94c1e5f3934d6ac1333f991f675fcf08a618397a413/aiobotocore-2.25.2-py3-none-any.whl", hash = "sha256:0cec45c6ba7627dd5e5460337291c86ac38c3b512ec4054ce76407d0f7f2a48f", size = 86048, upload-time = "2025-11-11T18:51:26.139Z" }, ] [[package]] @@ -358,11 +358,11 @@ wheels = [ [[package]] name = "annotated-doc" -version = "0.0.3" +version = "0.0.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/a6/dc46877b911e40c00d395771ea710d5e77b6de7bacd5fdcd78d70cc5a48f/annotated_doc-0.0.3.tar.gz", hash = "sha256:e18370014c70187422c33e945053ff4c286f453a984eba84d0dbfa0c935adeda", size = 5535, upload-time = "2025-10-24T14:57:10.718Z" } +sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/b7/cf592cb5de5cb3bade3357f8d2cf42bf103bbe39f459824b4939fd212911/annotated_doc-0.0.3-py3-none-any.whl", hash = "sha256:348ec6664a76f1fd3be81f43dffbee4c7e8ce931ba71ec67cc7f4ade7fbbb580", size = 5488, upload-time = "2025-10-24T14:57:09.462Z" }, + { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, ] [[package]] @@ -691,16 +691,16 @@ wheels = [ [[package]] name = "botocore" -version = "1.40.61" +version = "1.40.70" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/28/a3/81d3a47c2dbfd76f185d3b894f2ad01a75096c006a2dd91f237dca182188/botocore-1.40.61.tar.gz", hash = "sha256:a2487ad69b090f9cccd64cf07c7021cd80ee9c0655ad974f87045b02f3ef52cd", size = 14393956, upload-time = "2025-10-28T19:26:46.108Z" } +sdist = { url = "https://files.pythonhosted.org/packages/35/c1/8c4c199ae1663feee579a15861e34f10b29da11ae6ea0ad7b6a847ef3823/botocore-1.40.70.tar.gz", hash = "sha256:61b1f2cecd54d1b28a081116fa113b97bf4e17da57c62ae2c2751fe4c528af1f", size = 14444592, upload-time = "2025-11-10T20:29:04.046Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/c5/f6ce561004db45f0b847c2cd9b19c67c6bf348a82018a48cb718be6b58b0/botocore-1.40.61-py3-none-any.whl", hash = "sha256:17ebae412692fd4824f99cde0f08d50126dc97954008e5ba2b522eb049238aa7", size = 14055973, upload-time = "2025-10-28T19:26:42.15Z" }, + { url = "https://files.pythonhosted.org/packages/55/d2/507fd0ee4dd574d2bdbdeac5df83f39d2cae1ffe97d4622cca6f6bab39f1/botocore-1.40.70-py3-none-any.whl", hash = "sha256:4a394ad25f5d9f1ef0bed610365744523eeb5c22de6862ab25d8c93f9f6d295c", size = 14106829, upload-time = "2025-11-10T20:29:01.101Z" }, ] [[package]] @@ -775,11 +775,11 @@ wheels = [ [[package]] name = "certifi" -version = "2025.10.5" +version = "2025.11.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, ] [[package]] @@ -1292,34 +1292,40 @@ wheels = [ [[package]] name = "duckdb" -version = "1.4.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ea/e7/21cf50a3d52ffceee1f0bcc3997fa96a5062e6bab705baee4f6c4e33cce5/duckdb-1.4.1.tar.gz", hash = "sha256:f903882f045d057ebccad12ac69975952832edfe133697694854bb784b8d6c76", size = 18461687, upload-time = "2025-10-07T10:37:28.605Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/68/cc/00a07de0e33d16763edd4132d7c8a2f9efd57a2f296a25a948f239a1fadf/duckdb-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:296b4fff3908fb4c47b0aa1d77bd1933375e75401009d2dc81af8e7a0b8a05b4", size = 29062814, upload-time = "2025-10-07T10:36:14.261Z" }, - { url = "https://files.pythonhosted.org/packages/17/ea/fb0fda8886d1928f1b2a53a1163ef94f6f4b41f6d8b29eee457acfc2fa67/duckdb-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b4182800092115feee5d71a8691efb283d3c9f5eb0b36362b308ef007a12222", size = 16161652, upload-time = "2025-10-07T10:36:17.358Z" }, - { url = "https://files.pythonhosted.org/packages/b4/5f/052e6436a71f461e61cd3a982954c029145a84b58cefa1dfb3eb2d96e4fc/duckdb-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:67cc3b6c7f7ba07a69e9331b8ccea7a60cbcd4204bb473e5da9b71588bd2eca9", size = 13753030, upload-time = "2025-10-07T10:36:19.782Z" }, - { url = "https://files.pythonhosted.org/packages/c2/fd/3ae3c89d0f6ad54c0be4430e572306fbfc9f173c97b23c5025a540449325/duckdb-1.4.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cef0cee7030b561640cb9af718f8841b19cdd2aa020d53561057b5743bea90b", size = 18487683, upload-time = "2025-10-07T10:36:22.375Z" }, - { url = "https://files.pythonhosted.org/packages/d4/3c/eef454cd7c3880c2d55b50e18a9c7a213bf91ded79efcfb573d8d6dd8a47/duckdb-1.4.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bf93347f37a46bacce6ac859d651dbf5731e2c94a64ab358300425b09e3de23", size = 20487080, upload-time = "2025-10-07T10:36:24.692Z" }, - { url = "https://files.pythonhosted.org/packages/bb/5b/b619f4c986a1cb0b06315239da9ce5fd94a20c07a344d03e2635d56a6967/duckdb-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:2e60d2361f978908a3d96eebaf1f4b346f283afcc467351aae50ea45ca293a2b", size = 12324436, upload-time = "2025-10-07T10:36:27.458Z" }, - { url = "https://files.pythonhosted.org/packages/d9/52/606f13fa9669a24166d2fe523e28982d8ef9039874b4de774255c7806d1f/duckdb-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:605d563c1d5203ca992497cd33fb386ac3d533deca970f9dcf539f62a34e22a9", size = 29065894, upload-time = "2025-10-07T10:36:29.837Z" }, - { url = "https://files.pythonhosted.org/packages/84/57/138241952ece868b9577e607858466315bed1739e1fbb47205df4dfdfd88/duckdb-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d3305c7c4b70336171de7adfdb50431f23671c000f11839b580c4201d9ce6ef5", size = 16163720, upload-time = "2025-10-07T10:36:32.241Z" }, - { url = "https://files.pythonhosted.org/packages/a3/81/afa3a0a78498a6f4acfea75c48a70c5082032d9ac87822713d7c2d164af1/duckdb-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a063d6febbe34b32f1ad2e68822db4d0e4b1102036f49aaeeb22b844427a75df", size = 13756223, upload-time = "2025-10-07T10:36:34.673Z" }, - { url = "https://files.pythonhosted.org/packages/47/dd/5f6064fbd9248e37a3e806a244f81e0390ab8f989d231b584fb954f257fc/duckdb-1.4.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1ffcaaf74f7d1df3684b54685cbf8d3ce732781c541def8e1ced304859733ae", size = 18487022, upload-time = "2025-10-07T10:36:36.759Z" }, - { url = "https://files.pythonhosted.org/packages/a1/10/b54969a1c42fd9344ad39228d671faceb8aa9f144b67cd9531a63551757f/duckdb-1.4.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:685d3d1599dc08160e0fa0cf09e93ac4ff8b8ed399cb69f8b5391cd46b5b207c", size = 20491004, upload-time = "2025-10-07T10:36:39.318Z" }, - { url = "https://files.pythonhosted.org/packages/ed/d5/7332ae8f804869a4e895937821b776199a283f8d9fc775fd3ae5a0558099/duckdb-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:78f1d28a15ae73bd449c43f80233732adffa49be1840a32de8f1a6bb5b286764", size = 12327619, upload-time = "2025-10-07T10:36:41.509Z" }, - { url = "https://files.pythonhosted.org/packages/0e/6c/906a3fe41cd247b5638866fc1245226b528de196588802d4df4df1e6e819/duckdb-1.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cd1765a7d180b7482874586859fc23bc9969d7d6c96ced83b245e6c6f49cde7f", size = 29076820, upload-time = "2025-10-07T10:36:43.782Z" }, - { url = "https://files.pythonhosted.org/packages/66/c7/01dd33083f01f618c2a29f6dd068baf16945b8cbdb132929d3766610bbbb/duckdb-1.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8ed7a86725185470953410823762956606693c0813bb64e09c7d44dbd9253a64", size = 16167558, upload-time = "2025-10-07T10:36:46.003Z" }, - { url = "https://files.pythonhosted.org/packages/81/e2/f983b4b7ae1dfbdd2792dd31dee9a0d35f88554452cbfc6c9d65e22fdfa9/duckdb-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8a189bdfc64cfb9cc1adfbe4f2dcfde0a4992ec08505ad8ce33c886e4813f0bf", size = 13762226, upload-time = "2025-10-07T10:36:48.55Z" }, - { url = "https://files.pythonhosted.org/packages/ed/34/fb69a7be19b90f573b3cc890961be7b11870b77514769655657514f10a98/duckdb-1.4.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9090089b6486f7319c92acdeed8acda022d4374032d78a465956f50fc52fabf", size = 18500901, upload-time = "2025-10-07T10:36:52.445Z" }, - { url = "https://files.pythonhosted.org/packages/e4/a5/1395d7b49d5589e85da9a9d7ffd8b50364c9d159c2807bef72d547f0ad1e/duckdb-1.4.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:142552ea3e768048e0e8c832077a545ca07792631c59edaee925e3e67401c2a0", size = 20514177, upload-time = "2025-10-07T10:36:55.358Z" }, - { url = "https://files.pythonhosted.org/packages/c0/21/08f10706d30252753349ec545833fc0cea67c11abd0b5223acf2827f1056/duckdb-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:567f3b3a785a9e8650612461893c49ca799661d2345a6024dda48324ece89ded", size = 12336422, upload-time = "2025-10-07T10:36:57.521Z" }, - { url = "https://files.pythonhosted.org/packages/d7/08/705988c33e38665c969f7876b3ca4328be578554aa7e3dc0f34158da3e64/duckdb-1.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:46496a2518752ae0c6c5d75d4cdecf56ea23dd098746391176dd8e42cf157791", size = 29077070, upload-time = "2025-10-07T10:36:59.83Z" }, - { url = "https://files.pythonhosted.org/packages/99/c5/7c9165f1e6b9069441bcda4da1e19382d4a2357783d37ff9ae238c5c41ac/duckdb-1.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1c65ae7e9b541cea07d8075343bcfebdecc29a3c0481aa6078ee63d51951cfcd", size = 16167506, upload-time = "2025-10-07T10:37:02.24Z" }, - { url = "https://files.pythonhosted.org/packages/38/46/267f4a570a0ee3ae6871ddc03435f9942884284e22a7ba9b7cb252ee69b6/duckdb-1.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:598d1a314e34b65d9399ddd066ccce1eeab6a60a2ef5885a84ce5ed62dbaf729", size = 13762330, upload-time = "2025-10-07T10:37:04.581Z" }, - { url = "https://files.pythonhosted.org/packages/15/7b/c4f272a40c36d82df20937d93a1780eb39ab0107fe42b62cba889151eab9/duckdb-1.4.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2f16b8def782d484a9f035fc422bb6f06941ed0054b4511ddcdc514a7fb6a75", size = 18504687, upload-time = "2025-10-07T10:37:06.991Z" }, - { url = "https://files.pythonhosted.org/packages/17/fc/9b958751f0116d7b0406406b07fa6f5a10c22d699be27826d0b896f9bf51/duckdb-1.4.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5a7d0aed068a5c33622a8848857947cab5cfb3f2a315b1251849bac2c74c492", size = 20513823, upload-time = "2025-10-07T10:37:09.349Z" }, - { url = "https://files.pythonhosted.org/packages/30/79/4f544d73fcc0513b71296cb3ebb28a227d22e80dec27204977039b9fa875/duckdb-1.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:280fd663dacdd12bb3c3bf41f3e5b2e5b95e00b88120afabb8b8befa5f335c6f", size = 12336460, upload-time = "2025-10-07T10:37:12.154Z" }, +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/81/99/ac6c105118751cc3ccae980b12e44847273f3402e647ec3197aff2251e23/duckdb-1.4.2.tar.gz", hash = "sha256:df81acee3b15ecb2c72eb8f8579fb5922f6f56c71f5c8892ea3bc6fab39aa2c4", size = 18469786, upload-time = "2025-11-12T13:18:04.203Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/db/de454dea845f263fe42fa176c12ba9befe86a87514a2e5a48494a8ca5003/duckdb-1.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85f0c36c1b5f378d96dd7d8c6d312317f4f547a567e8b76cacb2590a31d931f3", size = 28999618, upload-time = "2025-11-12T13:16:29.558Z" }, + { url = "https://files.pythonhosted.org/packages/1a/39/644e8b130058188a15d4e5f2b955306ee486f3843d8479da1c846a85342f/duckdb-1.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:125cd89dbfd40846f216032b11e5eeaf2be13ee4d6745b82413ddd213ddc4d99", size = 15396589, upload-time = "2025-11-12T13:16:32.161Z" }, + { url = "https://files.pythonhosted.org/packages/50/f6/11446807f06dd65227f9817e04c01309ec8009b7fe6f0cf3fc0d7f6c7ea2/duckdb-1.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c80934cb15879844a752776a1ea3d1405635f307f5bb8b87c99f5a5564d33a", size = 13726628, upload-time = "2025-11-12T13:16:34.316Z" }, + { url = "https://files.pythonhosted.org/packages/a0/2c/6b2cf2d9df3776accb25ac375759c1d571fd730f216017c52cb5d4deffd6/duckdb-1.4.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2d3c39429b3ce1ee33d86daa94bed75a1f5b0fcf4d66d0839a6fcee398894548", size = 18455943, upload-time = "2025-11-12T13:16:36.967Z" }, + { url = "https://files.pythonhosted.org/packages/a6/b4/f213b764bd7f2c99aab20d25e4aaeda9ce54e1dc09b326c4da5a4fbe6bfd/duckdb-1.4.2-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4da7aafa94800f475d287814ad91993cf1f912c16f76ff4b411769da40c4b7da", size = 20454873, upload-time = "2025-11-12T13:16:39.801Z" }, + { url = "https://files.pythonhosted.org/packages/db/0d/5ae694d1779ec06beff624a5f59190c2f140e753cbdba0f5d0c7f3d44e37/duckdb-1.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:c45e0e682ee9073c36dc34d7ad8033210bfea0cab80cc98d1eca516227b35fdf", size = 12320762, upload-time = "2025-11-12T13:16:42.085Z" }, + { url = "https://files.pythonhosted.org/packages/1a/76/5b79eac0abcb239806da1d26f20515882a8392d0729a031af9e61d494dd4/duckdb-1.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b2d882672b61bc6117a2c524cf64ea519d2e829295951d214f04e126f1549b09", size = 29005908, upload-time = "2025-11-12T13:16:44.454Z" }, + { url = "https://files.pythonhosted.org/packages/73/1a/324d7787fdb0de96872ff7b48524830930494b45abf9501875be7456faa2/duckdb-1.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:995ec9c1fc3ce5fbfe5950b980ede2a9d51b35fdf2e3f873ce94c22fc3355fdc", size = 15398994, upload-time = "2025-11-12T13:16:46.802Z" }, + { url = "https://files.pythonhosted.org/packages/ad/c6/a2a072ca73f91a32c0db1254dd84fec30f4d673f9d57d853802aedf867fa/duckdb-1.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19d2c2f3cdf0242cad42e803602bbc2636706fc1d2d260ffac815ea2e3a018e8", size = 13727492, upload-time = "2025-11-12T13:16:49.097Z" }, + { url = "https://files.pythonhosted.org/packages/d6/d5/8f84b3685a8730f47e68bce46dbce789cb85c915a8c6aafdf85830589eb3/duckdb-1.4.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a496a04458590dcec8e928122ebe2ecbb42c3e1de4119f5461f7bf547acbe79", size = 18456479, upload-time = "2025-11-12T13:16:51.66Z" }, + { url = "https://files.pythonhosted.org/packages/30/7c/709a80e72a3bf013fa890fc767d2959a8a2a15abee4088559ddabcb9399f/duckdb-1.4.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0c2315b693f201787c9892f31eb9a0484d3c648edb3578a86dc8c1284dd2873a", size = 20458319, upload-time = "2025-11-12T13:16:54.24Z" }, + { url = "https://files.pythonhosted.org/packages/93/ff/e0b0dd10e6da48a262f3e054378a3781febf28af3381c0e1e901d0390b3c/duckdb-1.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:bdd2d808806ceeeec33ba89665a0bb707af8815f2ca40e6c4c581966c0628ba1", size = 12320864, upload-time = "2025-11-12T13:16:56.798Z" }, + { url = "https://files.pythonhosted.org/packages/c9/29/2f68c57e7c4242fedbf4b3fdc24fce2ffcf60640c936621d8a645593a161/duckdb-1.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9356fe17af2711e0a5ace4b20a0373e03163545fd7516e0c3c40428f44597052", size = 29015814, upload-time = "2025-11-12T13:16:59.329Z" }, + { url = "https://files.pythonhosted.org/packages/34/b7/030cc278a4ae788800a833b2901b9a7da7a6993121053c4155c359328531/duckdb-1.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:946a8374c0252db3fa41165ab9952b48adc8de06561a6b5fd62025ac700e492f", size = 15403892, upload-time = "2025-11-12T13:17:02.141Z" }, + { url = "https://files.pythonhosted.org/packages/f7/a2/67f4798a7a29bd0813f8a1e94a83e857e57f5d1ba14cf3edc5551aad0095/duckdb-1.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:389fa9abe4ca37d091332a2f8c3ebd713f18e87dc4cb5e8efd3e5aa8ddf8885f", size = 13733622, upload-time = "2025-11-12T13:17:04.502Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ac/d0d0e3feae9663334b2336f15785d280b54a56c3ffa10334e20a51a87ecd/duckdb-1.4.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be8c0c40f2264b91500b89c688f743e1c7764966e988f680b1f19416b00052e", size = 18470220, upload-time = "2025-11-12T13:17:07.049Z" }, + { url = "https://files.pythonhosted.org/packages/a5/52/7570a50430cbffc8bd702443ac28a446b0fa4f77747a3821d4b37a852b15/duckdb-1.4.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6a21732dd52a76f1e61484c06d65800b18f57fe29e8102a7466c201a2221604", size = 20481138, upload-time = "2025-11-12T13:17:09.459Z" }, + { url = "https://files.pythonhosted.org/packages/95/5e/be05f46a290ea27630c112ff9e01fd01f585e599967fc52fe2edc7bc2039/duckdb-1.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:769440f4507c20542ae2e5b87f6c6c6d3f148c0aa8f912528f6c97e9aedf6a21", size = 12330737, upload-time = "2025-11-12T13:17:12.02Z" }, + { url = "https://files.pythonhosted.org/packages/70/c4/5054dbe79cf570b0c97db0c2eba7eb541cc561037360479059a3b57e4a32/duckdb-1.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:de646227fc2c53101ac84e86e444e7561aa077387aca8b37052f3803ee690a17", size = 29015784, upload-time = "2025-11-12T13:17:14.409Z" }, + { url = "https://files.pythonhosted.org/packages/2c/b8/97f4f07d9459f5d262751cccfb2f4256debb8fe5ca92370cebe21aab1ee2/duckdb-1.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f1fac31babda2045d4cdefe6d0fd2ebdd8d4c2a333fbcc11607cfeaec202d18d", size = 15403788, upload-time = "2025-11-12T13:17:16.864Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ea/112f33ace03682bafd4aaf0a3336da689b9834663e7032b3d678fd2902c9/duckdb-1.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:43ac632f40ab1aede9b4ce3c09ea043f26f3db97b83c07c632c84ebd7f7c0f4a", size = 13733603, upload-time = "2025-11-12T13:17:20.884Z" }, + { url = "https://files.pythonhosted.org/packages/34/83/8d6f845a9a946e8b47b6253b9edb084c45670763e815feed6cfefc957e89/duckdb-1.4.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77db030b48321bf785767b7b1800bf657dd2584f6df0a77e05201ecd22017da2", size = 18473725, upload-time = "2025-11-12T13:17:23.074Z" }, + { url = "https://files.pythonhosted.org/packages/82/29/153d1b4fc14c68e6766d7712d35a7ab6272a801c52160126ac7df681f758/duckdb-1.4.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a456adbc3459c9dcd99052fad20bd5f8ef642be5b04d09590376b2eb3eb84f5c", size = 20481971, upload-time = "2025-11-12T13:17:26.703Z" }, + { url = "https://files.pythonhosted.org/packages/58/b7/8d3a58b5ebfb9e79ed4030a0f2fbd7e404c52602e977b1e7ab51651816c7/duckdb-1.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:2f7c61617d2b1da3da5d7e215be616ad45aa3221c4b9e2c4d1c28ed09bc3c1c4", size = 12330535, upload-time = "2025-11-12T13:17:29.175Z" }, + { url = "https://files.pythonhosted.org/packages/25/46/0f316e4d0d6bada350b9da06691a2537c329c8948c78e8b5e0c4874bc5e2/duckdb-1.4.2-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:422be8c6bdc98366c97f464b204b81b892bf962abeae6b0184104b8233da4f19", size = 29028616, upload-time = "2025-11-12T13:17:31.599Z" }, + { url = "https://files.pythonhosted.org/packages/82/ab/e04a8f97865251b544aee9501088d4f0cb8e8b37339bd465c0d33857d411/duckdb-1.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:459b1855bd06a226a2838da4f14c8863fd87a62e63d414a7f7f682a7c616511a", size = 15410382, upload-time = "2025-11-12T13:17:34.14Z" }, + { url = "https://files.pythonhosted.org/packages/47/ec/b8229517c2f9fe88a38bb1a172a2da4d0ff34996d319d74554fda80b6358/duckdb-1.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20c45b4ead1ea4d23a1be1cd4f1dfc635e58b55f0dd11e38781369be6c549903", size = 13737588, upload-time = "2025-11-12T13:17:36.515Z" }, + { url = "https://files.pythonhosted.org/packages/f2/9a/63d26da9011890a5b893e0c21845c0c0b43c634bf263af3bbca64be0db76/duckdb-1.4.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e552451054534970dc999e69ca5ae5c606458548c43fb66d772117760485096", size = 18477886, upload-time = "2025-11-12T13:17:39.136Z" }, + { url = "https://files.pythonhosted.org/packages/23/35/b1fae4c5245697837f6f63e407fa81e7ccc7948f6ef2b124cd38736f4d1d/duckdb-1.4.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:128c97dab574a438d7c8d020670b21c68792267d88e65a7773667b556541fa9b", size = 20483292, upload-time = "2025-11-12T13:17:41.501Z" }, + { url = "https://files.pythonhosted.org/packages/25/5e/6f5ebaabc12c6db62f471f86b5c9c8debd57f11aa1b2acbbcc4c68683238/duckdb-1.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:dfcc56a83420c0dec0b83e97a6b33addac1b7554b8828894f9d203955591218c", size = 12830520, upload-time = "2025-11-12T13:17:43.93Z" }, ] [[package]] @@ -1336,23 +1342,23 @@ wheels = [ [[package]] name = "execnet" -version = "2.1.1" +version = "2.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" }, + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, ] [[package]] name = "faker" -version = "37.12.0" +version = "38.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "tzdata" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/84/e95acaa848b855e15c83331d0401ee5f84b2f60889255c2e055cb4fb6bdf/faker-37.12.0.tar.gz", hash = "sha256:7505e59a7e02fa9010f06c3e1e92f8250d4cfbb30632296140c2d6dbef09b0fa", size = 1935741, upload-time = "2025-10-24T15:19:58.764Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/05/206c151fe8ca9c8e46963d6c8b6e2e281f272009dad30fe3792005393a5e/faker-38.0.0.tar.gz", hash = "sha256:797aa03fa86982dfb6206918acc10ebf3655bdaa89ddfd3e668d7cc69537331a", size = 1935705, upload-time = "2025-11-12T01:47:39.586Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/98/2c050dec90e295a524c9b65c4cb9e7c302386a296b2938710448cbd267d5/faker-37.12.0-py3-none-any.whl", hash = "sha256:afe7ccc038da92f2fbae30d8e16d19d91e92e242f8401ce9caf44de892bab4c4", size = 1975461, upload-time = "2025-10-24T15:19:55.739Z" }, + { url = "https://files.pythonhosted.org/packages/4d/1e/e6d1940d2c2617d7e6a0a3fdd90e506ff141715cdc4c3ecd7217d937e656/faker-38.0.0-py3-none-any.whl", hash = "sha256:ad4ea6fbfaac2a75d92943e6a79c81f38ecff92378f6541dea9a677ec789a5b2", size = 1975561, upload-time = "2025-11-12T01:47:36.672Z" }, ] [[package]] @@ -1737,7 +1743,7 @@ wheels = [ [[package]] name = "google-cloud-aiplatform" -version = "1.126.1" +version = "1.127.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docstring-parser" }, @@ -1754,9 +1760,9 @@ dependencies = [ { name = "shapely" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/36/f8e41679e6cb7ea6b50c0bfeaea0b9daf1475cafa152ad30456f6ec5471f/google_cloud_aiplatform-1.126.1.tar.gz", hash = "sha256:956706c587b817e36d5a16af5ab7f48c73dde76c71d660ecd4284f0339dc37d4", size = 9777644, upload-time = "2025-11-06T22:00:52.894Z" } +sdist = { url = "https://files.pythonhosted.org/packages/27/c0/ad6cd8c574256b8efd90e7d987b0bb529f212fa1d242005cf9c65d37ea6e/google_cloud_aiplatform-1.127.0.tar.gz", hash = "sha256:206f80aaafeff5e56c059bb71bedafe1ef47cc6cee05fe81c344ff7998f5f921", size = 9777006, upload-time = "2025-11-12T13:44:52.779Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b2/c6/3dc21f6182703d170624ed9f87894d35e1d51d1facbb471aa62cc255f233/google_cloud_aiplatform-1.126.1-py2.py3-none-any.whl", hash = "sha256:66d4daea95356d772ff026f13448ea80aa763dfd8daedc21d9ca36d0a1ee8a65", size = 8123682, upload-time = "2025-11-06T22:00:49.874Z" }, + { url = "https://files.pythonhosted.org/packages/9e/bc/105e95a8fab5fd3a5bc503a80065bc32aac21746fc774a318b480f137874/google_cloud_aiplatform-1.127.0-py2.py3-none-any.whl", hash = "sha256:66ea52747a97561247dd59adb6311d6d897b5851108a0c186a0194d921a37bde", size = 8124037, upload-time = "2025-11-12T13:44:50.045Z" }, ] [package.optional-dependencies] @@ -1792,7 +1798,7 @@ wheels = [ [[package]] name = "google-cloud-alloydb-connector" -version = "1.9.1" +version = "1.10.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiofiles" }, @@ -1803,9 +1809,9 @@ dependencies = [ { name = "protobuf" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6f/03/d94089a6bc8f22abfb6d9396fe2636f47c91dc8a3a718f223444c3740c9c/google_cloud_alloydb_connector-1.9.1.tar.gz", hash = "sha256:1f50794f428d6f5da09c874fe209120e2a7247d618c3e9a4584eb84307f3c138", size = 36069, upload-time = "2025-09-08T17:26:13.611Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/a2/db2c04cf5c26095fb94c4390067f5405a3e8ce7edb10760f5e327f53cb22/google_cloud_alloydb_connector-1.10.0.tar.gz", hash = "sha256:1ab465a12d7b8fd28fd85cd11e64d8c91334087da9e6410c6dfddb04dc85cb90", size = 36040, upload-time = "2025-11-12T13:53:05.883Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0b/73/c43854d8f8103b175fb2689a6cf755771823557d524831f88e372d91924b/google_cloud_alloydb_connector-1.9.1-py3-none-any.whl", hash = "sha256:d4da1722321279e4ecff29e37d9d3e42367e8ddb71e9f2d756eb5aa8ccbafcf1", size = 45840, upload-time = "2025-09-08T17:26:12.667Z" }, + { url = "https://files.pythonhosted.org/packages/f8/3d/4d480da18b902bcdde3470b4d0f9dacb5bfeb870d1636623740276c1c678/google_cloud_alloydb_connector-1.10.0-py3-none-any.whl", hash = "sha256:81e5c52b606b00357fce3381f2340c2f40f3284c19c5db8dbed6007428f95760", size = 45847, upload-time = "2025-11-12T13:53:04.632Z" }, ] [[package]] @@ -2075,7 +2081,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.49.0" +version = "1.50.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2087,9 +2093,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/82/49/1a724ee3c3748fa50721d53a52d9fee88c67d0c43bb16eb2b10ee89ab239/google_genai-1.49.0.tar.gz", hash = "sha256:35eb16023b72e298571ae30e919c810694f258f2ba68fc77a2185c7c8829ad5a", size = 253493, upload-time = "2025-11-05T22:41:03.278Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5f/7b/0d0c8f3a52cfda38064e650f7d2c02a7108d3a34d161bd5191069f909cf1/google_genai-1.50.0.tar.gz", hash = "sha256:b1ee723b3491977166cf268e6fb44e5dc430fbbd3c45011e752826a4ffdf2066", size = 254654, upload-time = "2025-11-12T22:45:21.964Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/d3/84a152746dc7bdebb8ba0fd7d6157263044acd1d14b2a53e8df4a307b6b7/google_genai-1.49.0-py3-none-any.whl", hash = "sha256:ad49cd5be5b63397069e7aef9a4fe0a84cbdf25fcd93408e795292308db4ef32", size = 256098, upload-time = "2025-11-05T22:41:01.429Z" }, + { url = "https://files.pythonhosted.org/packages/9f/0c/959a1343003bbbb50b20541304c5eee5564225182c285aab3e0d09f24db0/google_genai-1.50.0-py3-none-any.whl", hash = "sha256:adfb8ab3fca612693c1778267649d955757f95a7a1bf97e781802ab3b5b993a0", size = 257311, upload-time = "2025-11-12T22:45:20.731Z" }, ] [[package]] @@ -3499,39 +3505,39 @@ wheels = [ [[package]] name = "oracledb" -version = "3.4.0" +version = "3.4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8d/24/47601e8c2c80b577ad62a05b1e904670116845b5e013591aca05ad973309/oracledb-3.4.0.tar.gz", hash = "sha256:3196f0b9d3475313e832d4fd944ab21f7ebdf596d9abd7efd2b2f7e208538150", size = 851221, upload-time = "2025-10-07T04:15:36.28Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/ac/1315ecabc52ef5c08860e8f7eebd0496748a7ad490f34476e9a6eaa9277b/oracledb-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:90e5036599264837b9738202e50b4d6e0a16512fbdd0a8d7bdd18f44c4ab9e4a", size = 4425597, upload-time = "2025-10-07T04:15:47.242Z" }, - { url = "https://files.pythonhosted.org/packages/bd/5e/7a7abac9b3fe1cea84ed13df8e0558a6285de7aa9295b6fda1ab338f7cb2/oracledb-3.4.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9517bc386edf91f311023f72ac02a55a69e2c55218f020d6359c3b95d5bf7db", size = 2523648, upload-time = "2025-10-07T04:15:49.371Z" }, - { url = "https://files.pythonhosted.org/packages/6e/2f/3d1e8363032fcf4d0364b2523ea0477d902c583fe8cda716cb109908be9f/oracledb-3.4.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c3778c7994809fbb05d27b36f5579d7837a1961cc034cedb6c4808222c4435", size = 2701596, upload-time = "2025-10-07T04:15:51.539Z" }, - { url = "https://files.pythonhosted.org/packages/00/cd/d5e6f2d24c78ce0fe0927c185334def7030ead903b314be8155cb910cafb/oracledb-3.4.0-cp310-cp310-win32.whl", hash = "sha256:2d43234f26a5928390cd9c83923054cf442875bd34f2b9b9b2432427de15a037", size = 1555277, upload-time = "2025-10-07T04:15:54.107Z" }, - { url = "https://files.pythonhosted.org/packages/e2/da/247fea207225e6b1fca6e74577b6748c944bb69b88884af44bf6b743f8d8/oracledb-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8687750374a947c12b05ffa2e7788fe93bb8cbf16cb1f231578381f47b976aa", size = 1907401, upload-time = "2025-10-07T04:15:56.043Z" }, - { url = "https://files.pythonhosted.org/packages/b5/f7/45b7be483b100d1d3b0f8620a1073b098b1d5eb00b38dd4526516b8e537d/oracledb-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ea8d5b548657cf89fb3b9a071a87726a755d5546eb452365d31d3cdb6814d56b", size = 4483773, upload-time = "2025-10-07T04:15:59.519Z" }, - { url = "https://files.pythonhosted.org/packages/d6/c9/5ff47cef222260eb07f9d24fdf617fd9031eb12178fe7494d48528e28784/oracledb-3.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a8b260a495472212025409788b4f470d15590b0912e2912e2c6019fbda92aea9", size = 2561595, upload-time = "2025-10-07T04:16:01.376Z" }, - { url = "https://files.pythonhosted.org/packages/12/89/d4f1f925bcf6151f8035e86604df9bd6472fe6a4470064d243d4c6cdf8df/oracledb-3.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:06384289b4c3bb1f6af9c0911e4551fab90d4e8de8d9e8c889b95d9dc90e8db8", size = 2736584, upload-time = "2025-10-07T04:16:03.595Z" }, - { url = "https://files.pythonhosted.org/packages/33/d0/1fcc2f312c8cb5ea130f8915b9782db1b5d2287a624dd8f777c81238a03e/oracledb-3.4.0-cp311-cp311-win32.whl", hash = "sha256:90b0605b8096cfed23006a1825e6c84164f6ebb57d0661ca83ad530a9fca09d1", size = 1553088, upload-time = "2025-10-07T04:16:06.466Z" }, - { url = "https://files.pythonhosted.org/packages/eb/38/48a7dc4d8992bd3436d0a95bf85afafd5afd87c2f60a5493fb61f9525d7e/oracledb-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:f400d30e1afc45bc54bde6fde58c5c6dddf9bc65c73e261f2c8a44b36131e627", size = 1913920, upload-time = "2025-10-07T04:16:08.543Z" }, - { url = "https://files.pythonhosted.org/packages/dd/9c/7c7c9be57867842b166935ecf354b290d3b4cd7e6c070f68db3f71d5e0d4/oracledb-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4613fef1a0ede3c3af8398f5b693e7914e725d1c0fa7ccf03742192d1e496758", size = 4485180, upload-time = "2025-10-07T04:16:11.179Z" }, - { url = "https://files.pythonhosted.org/packages/66/35/e16a31e5f0430c806aac564ebc13ccdae1bfe371b90c877255d0aff21e76/oracledb-3.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:796cfb1ce492523379836bc4880b9665993e5cf5044a0fb55b40ab3f617be983", size = 2373297, upload-time = "2025-10-07T04:16:14.016Z" }, - { url = "https://files.pythonhosted.org/packages/db/9e/10e4f13081e51e7a55b9ddd2e84657ff45576f1062b953125499a11b547e/oracledb-3.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7e59627831df8910a48a1650ef48c3e57a91399c97f13029c632d2ae311b49b3", size = 2569896, upload-time = "2025-10-07T04:16:16.867Z" }, - { url = "https://files.pythonhosted.org/packages/46/61/f2fb338e523fb00e091722954994289565674435bf0b0438671e1e941723/oracledb-3.4.0-cp312-cp312-win32.whl", hash = "sha256:f0f59f15c4dc2a41ae66398c0c6416f053efb1be04309e0534acc9c39c2bbbae", size = 1513408, upload-time = "2025-10-07T04:16:18.882Z" }, - { url = "https://files.pythonhosted.org/packages/7f/74/489d1758a7b13da1049a8c3cd98945ead0a798b66aefb544ec14a9e206ec/oracledb-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:ce9380e757f29d79df6d1c8b4e14d68507d4b1b720c9fd8a9549a0605364a770", size = 1869386, upload-time = "2025-10-07T04:16:20.605Z" }, - { url = "https://files.pythonhosted.org/packages/22/0b/a154fb2d73130afffa617f4bdcd2debf6f2160f529f8573f833ce041e477/oracledb-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:70b5c732832297c2e1b5ea067c79a253edf3c70a0dedd2f8f269231fd0c649a3", size = 4466938, upload-time = "2025-10-07T04:16:23.63Z" }, - { url = "https://files.pythonhosted.org/packages/26/9c/18e48120965870d1b395e50a50872748b5a369f924b10997ea64f069cc58/oracledb-3.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c32e7742cba933ca3271762d9565a0b2fdb8d3b7f03d105401834c7ea25831e", size = 2364723, upload-time = "2025-10-07T04:16:25.719Z" }, - { url = "https://files.pythonhosted.org/packages/25/30/d426824d6f4cbb3609975c8c1beb6c394a47f9e0274306a1a49595599294/oracledb-3.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0b1da9bbd4411bd53ddcfb5ce9a69d791f42f6a6c8cd6665cfc20d1d88497cc7", size = 2559838, upload-time = "2025-10-07T04:16:28.175Z" }, - { url = "https://files.pythonhosted.org/packages/05/05/a4c6881b1d09893e04a12eaff01094aabdf9b0fb6b1cb5fab5aeb1a0f6c5/oracledb-3.4.0-cp313-cp313-win32.whl", hash = "sha256:2038870b19902fd1bf2735905d521bbd3e389298c47c39873d94b410ea61ae51", size = 1516726, upload-time = "2025-10-07T04:16:30.066Z" }, - { url = "https://files.pythonhosted.org/packages/75/73/b102f11ca161963c29a1783a4589cac1b9490c9233327b590a6be1e52a61/oracledb-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:f752823649cc1d27e90a439b823d94b9a5839189597b932b5ffbeeb607177a27", size = 1868572, upload-time = "2025-10-07T04:16:31.916Z" }, - { url = "https://files.pythonhosted.org/packages/f0/b4/b6ad31422d01018121eeac961f8af8eb8cf39b7f3c00c3295ffc2c8b8936/oracledb-3.4.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9d842a1c1f8462ca9b5228f79f93cfa7b7f33d202ab642509e7071134e8e12d2", size = 4482933, upload-time = "2025-10-07T04:16:33.99Z" }, - { url = "https://files.pythonhosted.org/packages/50/e0/9b5e359ed800c632cbcf6517f8e345a712e1357bfe67e6d9f864d72bf6ae/oracledb-3.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:746154270932699235229c776ced35e7759d80cf95cba1b326744bebc7ae7f77", size = 2400273, upload-time = "2025-10-07T04:16:35.677Z" }, - { url = "https://files.pythonhosted.org/packages/03/08/057341d84adbe4a8e73b875a9e732a0356fe9602f6dc6923edcc3e3aa509/oracledb-3.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7b312896bafb7f6e0e724b4fc2c28c4df6338302ac0906da05a07db5666e578", size = 2574810, upload-time = "2025-10-07T04:16:37.502Z" }, - { url = "https://files.pythonhosted.org/packages/6c/02/8d110e380cb7656ae5e6b91976595f2a174e3a858b6c7dfed0d795dc68ed/oracledb-3.4.0-cp314-cp314-win32.whl", hash = "sha256:98689c068900c6b276182c2f6181a2a42c905a0b4d7dc42bed05b80d515bf609", size = 1537801, upload-time = "2025-10-07T04:16:39.184Z" }, - { url = "https://files.pythonhosted.org/packages/56/94/679eabc8629caa5b4caa033871b294b9eef8b986d466be2f499c4cdc4bdd/oracledb-3.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:e89031578e08051ce2aa05f7590ca9d3368b0609dba614949fa85cf726482f5d", size = 1901942, upload-time = "2025-10-07T04:16:40.709Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/5e/9d/4e86cd410294ebbb1f90a609aaae61c5fa064a5c10e501de3f4c67664e6c/oracledb-3.4.1.tar.gz", hash = "sha256:f5920df5ac9446579e8409607bba31dc2d23a2286a5b0ea17cb0d78d419392a6", size = 852693, upload-time = "2025-11-12T03:21:36.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/70/05645e72a67b45396a248a7949d89c91dc7a1ab5f7cedad110d9804e29d5/oracledb-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dfe18061f064d0455fad10d9301f6f92df9e32d18d75fb32802caf1ced4b304c", size = 4243226, upload-time = "2025-11-12T03:21:41.734Z" }, + { url = "https://files.pythonhosted.org/packages/7e/cc/f3a78ae31f87e41378c7bc60928fa5432d4eba80806cb0086edc11803a22/oracledb-3.4.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:84055d6fd093a4d7b8ed653f433531e4c4cc161f7261d78efd7f6a65a1f19444", size = 2426914, upload-time = "2025-11-12T03:21:43.641Z" }, + { url = "https://files.pythonhosted.org/packages/a6/a6/3d3dabbec2651851f13fdb7c318a3c50780090235d340d851f7cb8deeeec/oracledb-3.4.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9e20b6cd3245e84c30188874c524bb3c67c79b7a04fcb864e6ac39f55eae826", size = 2605903, upload-time = "2025-11-12T03:21:45.378Z" }, + { url = "https://files.pythonhosted.org/packages/ae/59/aa174fc8f5629b890424702edf582a8a635acaa0db1315b16160d703a887/oracledb-3.4.1-cp310-cp310-win32.whl", hash = "sha256:abedb0bf464bcf14d83e245eae000e03cad8ac68c945eb09cc46002d800fbf00", size = 1490352, upload-time = "2025-11-12T03:21:46.732Z" }, + { url = "https://files.pythonhosted.org/packages/8a/1c/9dded6efc747d8980667584c8464295d80d205f8a131e31cacfb274b6ed5/oracledb-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ee604bb0f3acb5680782818f973445b8cd168e72a73b5ca2cd9807140afadee", size = 1837541, upload-time = "2025-11-12T03:21:48.571Z" }, + { url = "https://files.pythonhosted.org/packages/ed/9e/5901349b8797fabc7c6f78230376bfbd5541a847f1eb34be23bfb971add7/oracledb-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:20b268be64994d0f636df9ff7613dcce420133f373d0d7fc84a31dd2f07322c0", size = 4226376, upload-time = "2025-11-12T03:21:49.959Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c0/951d2ab8c04df9da309a82e211d19223a64dbbcfdd79f5f1aba6d8736408/oracledb-3.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d493946318d99a0f0e3f01d7c64c08ddae66f0aac735fa23c1eb94949d9db0f5", size = 2422323, upload-time = "2025-11-12T03:21:51.583Z" }, + { url = "https://files.pythonhosted.org/packages/a8/7c/82843dd7e55dec6331c0c7737e32523eb2f6156c6469055e2cb752e848f4/oracledb-3.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d64fda2fa5d3e82c58b2c5126ab5511bccb84f8b47eedfe9f17e9c100fe7683", size = 2601267, upload-time = "2025-11-12T03:21:52.978Z" }, + { url = "https://files.pythonhosted.org/packages/27/3f/67b50042f955574fca574a2234ba4af421e9268601bceb49efd9c43c6bc8/oracledb-3.4.1-cp311-cp311-win32.whl", hash = "sha256:cd80aa4c4dec7347c6d2909fbaf7e35a5253341ff2cb6f3782ab7ca712bf0405", size = 1488075, upload-time = "2025-11-12T03:21:54.704Z" }, + { url = "https://files.pythonhosted.org/packages/8d/14/bab071234d61e84c65712902dd0edec825d82b3198ffddc977c9ea9a91f3/oracledb-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:5e01e8696009cec4ebcb9fe678b23b8223595dc186c065899660cac4c1fc189b", size = 1843449, upload-time = "2025-11-12T03:21:56.342Z" }, + { url = "https://files.pythonhosted.org/packages/f7/d9/98367ba2c358de366de70b505531f9717cdfa7e29eff0c9ad113eecfce96/oracledb-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1c3f92c023ef1983e0e7f9a1b4a31df8568974c28c06ab0a574b1126e45083a8", size = 4222133, upload-time = "2025-11-12T03:21:58.212Z" }, + { url = "https://files.pythonhosted.org/packages/36/52/48ad2f7dae6288a2ddf0ac536d46ce4883d2d10ec7e16afbbd48f1ec0ff3/oracledb-3.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:251211d64b90cc42d00ec2d2893873bc02ff4bc22125e9fc5a7f148a6208fd88", size = 2230374, upload-time = "2025-11-12T03:21:59.656Z" }, + { url = "https://files.pythonhosted.org/packages/8d/08/60d4301b4f72f099ed2252f8d0eb143e6fe9e5c8f4c2705c3163cea36808/oracledb-3.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea529a5e6036fae3e2bc195fa76b6f48cd9c431e68c74ef78ee6a5e39c855c39", size = 2421755, upload-time = "2025-11-12T03:22:01.543Z" }, + { url = "https://files.pythonhosted.org/packages/48/35/412a90019a030f5dff0c031319733c6b8dd477832bafa88b733b4b3ec57b/oracledb-3.4.1-cp312-cp312-win32.whl", hash = "sha256:94e8e6d63b45fedd4e243147cb25dea1a0f6599d83852f3979fe725a8533e85a", size = 1449688, upload-time = "2025-11-12T03:22:03.422Z" }, + { url = "https://files.pythonhosted.org/packages/7b/01/ae9eca3055dc625923564ca653ca99ddd8eda95e44953ce55c18aba55066/oracledb-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:84f15c483f9ec80dcded925df6ff473c69a293cd694d09b69abb911500659df4", size = 1794622, upload-time = "2025-11-12T03:22:04.941Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4d/e32db901340dc6fc824d0d3b5e4660fe0199fba8adb0e81ac08b639c8ab9/oracledb-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ad817807b293e371c951af8ee67a56a5af88a5680a54fe79dfc7b9393ca128aa", size = 4206469, upload-time = "2025-11-12T03:22:06.881Z" }, + { url = "https://files.pythonhosted.org/packages/cf/68/1a038f29523eea19e42f4dd765bf523752408816b5ff21e8b998d8b25457/oracledb-3.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:34b9bc25eae217defa3f4b8289b4915cd1101aaeeec33c7bace74f927996d452", size = 2233055, upload-time = "2025-11-12T03:22:08.259Z" }, + { url = "https://files.pythonhosted.org/packages/b9/66/a51243553ac6b0e1bc2cfd4db8a2f3299b1b60c9231d7c9133ee1442d15b/oracledb-3.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be6575759ba56ab3758f82bfbb74f75288ce69190e19c087793050cb012c0aa1", size = 2443312, upload-time = "2025-11-12T03:22:09.615Z" }, + { url = "https://files.pythonhosted.org/packages/f7/57/a6056d4432c07a959fd1032dd45bfaff69b91ac7e1204dbccf7bf7b4a91d/oracledb-3.4.1-cp313-cp313-win32.whl", hash = "sha256:635587e5f28be83ec0bf72e4bfb2f3a4544c0f8e303f2327f376d57116894541", size = 1453553, upload-time = "2025-11-12T03:22:11.045Z" }, + { url = "https://files.pythonhosted.org/packages/6a/57/dca415d8dd18a2a030a9402d49039493cdce6acfd37c8a038a4ede2328e6/oracledb-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:354177708352e124c0f97ceccbe34be05e7f3ce7040a7dd3c2ebd857145ffe74", size = 1794005, upload-time = "2025-11-12T03:22:12.694Z" }, + { url = "https://files.pythonhosted.org/packages/59/07/dff7b9e6242b627d56f3fa6ad6639802003e1e5fbcc883d0ce27d82455ad/oracledb-3.4.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:3ec1f9dd7310da7cbf219c2a05bb52df08da950c95ad2ace8a289854947bdc6b", size = 4247946, upload-time = "2025-11-12T03:22:14.473Z" }, + { url = "https://files.pythonhosted.org/packages/1f/95/739868c6f312683cc3afe9534644b4ce2d054fe137d8f7a1e7786df9f5aa/oracledb-3.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:337a67d6c91015dfe7a2a1915f65c74adad26fcd428daaead296d91c92f09ad1", size = 2271628, upload-time = "2025-11-12T03:22:15.956Z" }, + { url = "https://files.pythonhosted.org/packages/fb/7c/307da513f5fb68e6454beb5bc1c715ec09a70d2af70a28b9fa6001c1b09b/oracledb-3.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d5ffe4dd26e8012de433ec69f93be5737d81b04324072ec36dad37eb778fd9d", size = 2455603, upload-time = "2025-11-12T03:22:18.112Z" }, + { url = "https://files.pythonhosted.org/packages/c5/1a/af5bd7239cebfc33541432cfcba75893a3f2f44fa66648e6d8ce1fe96b0c/oracledb-3.4.1-cp314-cp314-win32.whl", hash = "sha256:693ef5f8c420545511096b3bc9a3861617222717321bc78c776afbbb6c16c5b9", size = 1474932, upload-time = "2025-11-12T03:22:19.574Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ee/79d2ed18fd234bcbd407c1b36372dc898cf68de825ec650df7b1627acb51/oracledb-3.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:6adb483d7120cdd056173b71c901f71dbe2265c5bd402f768b0b1ab27af519b1", size = 1837566, upload-time = "2025-11-12T03:22:20.959Z" }, ] [[package]] @@ -3768,15 +3774,15 @@ wheels = [ [[package]] name = "polyfactory" -version = "2.22.3" +version = "2.22.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "faker" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/64/5a/c9105c974e03d78dc6d5642bee97f075156a28ad344428e562c6c86526b9/polyfactory-2.22.3.tar.gz", hash = "sha256:ae57d07408d1f7609031a83827c7980ce32104535e146cac2253988d0a7665e1", size = 263543, upload-time = "2025-10-18T14:04:54.901Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/74/193e3035e33adcb88399bb89fcb57578c15ea3060a085c5fff10e2fcd162/polyfactory-2.22.4.tar.gz", hash = "sha256:e63a5a55e8363830dfd71c0bcfc1651a29d9fc98048b54c8333de1971dc98547", size = 264413, upload-time = "2025-11-10T16:03:37.152Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c4/f7/244a5b1dd298650e4092c501197dad45036b1c31309ad4d01af430071a0f/polyfactory-2.22.3-py3-none-any.whl", hash = "sha256:0bfd5fe2fb2e5db39ded6aee8e923d1961095d4ebb44185cceee4654cb85e0b1", size = 63715, upload-time = "2025-10-18T14:04:52.657Z" }, + { url = "https://files.pythonhosted.org/packages/0b/12/95b5e48b07378df89be9f56e1bdc4fcc98928e2f4e7f5f38b3e8e479deb9/polyfactory-2.22.4-py3-none-any.whl", hash = "sha256:6c4ebe24e16e7e8461bdd56dfd7d4df3172936a5077c5e5d3b101a5517f267dc", size = 63888, upload-time = "2025-11-10T16:03:35.897Z" }, ] [[package]] @@ -4628,7 +4634,7 @@ wheels = [ [[package]] name = "pytest" -version = "8.4.2" +version = "9.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -4639,23 +4645,23 @@ dependencies = [ { name = "pygments" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125, upload-time = "2025-11-12T13:05:09.333Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, + { url = "https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668, upload-time = "2025-11-12T13:05:07.379Z" }, ] [[package]] name = "pytest-asyncio" -version = "1.2.0" +version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, { name = "pytest" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" } +sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" }, + { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, ] [[package]] @@ -6503,11 +6509,11 @@ wheels = [ [[package]] name = "types-psutil" -version = "7.0.0.20251001" +version = "7.0.0.20251111" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9e/91/b020f9100b196a1f247cd12575f68dcdad94f032c1e0c42987d7632142ce/types_psutil-7.0.0.20251001.tar.gz", hash = "sha256:60d696200ddae28677e7d88cdebd6e960294e85adefbaafe0f6e5d0e7b4c1963", size = 20469, upload-time = "2025-10-01T03:04:21.292Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/ba/4f48c927f38c7a4d6f7ff65cde91c49d28a95a56e00ec19b2813e1e0b1c1/types_psutil-7.0.0.20251111.tar.gz", hash = "sha256:d109ee2da4c0a9b69b8cefc46e195db8cf0fc0200b6641480df71e7f3f51a239", size = 20287, upload-time = "2025-11-11T03:06:37.482Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/99/50f30e0b648e6f583165cb2e535b0256a02a03efa4868cb2f017ad25b3d8/types_psutil-7.0.0.20251001-py3-none-any.whl", hash = "sha256:adc31de8386d31c61bd4123112fd51e2c700c7502a001cad72a3d56ba6b463d1", size = 23164, upload-time = "2025-10-01T03:04:20.089Z" }, + { url = "https://files.pythonhosted.org/packages/fb/bc/b081d10fbd933cdf839109707a693c668a174e2276d64159a582a9cebd3f/types_psutil-7.0.0.20251111-py3-none-any.whl", hash = "sha256:85ba00205dcfa3c73685122e5a360205d2fbc9b56f942b591027bf401ce0cc47", size = 23052, upload-time = "2025-11-11T03:06:36.011Z" }, ] [[package]] From 19318fea76ebf47f157bf3686dd19bb7b2d5bf61 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Thu, 13 Nov 2025 02:25:21 +0000 Subject: [PATCH 05/10] feat(portal): enhance PortalProvider and PortalManager with PID tracking and restart logic --- sqlspec/utils/portal.py | 28 ++++++++++++++++++------ tests/unit/test_utils/test_portal.py | 32 +++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/sqlspec/utils/portal.py b/sqlspec/utils/portal.py index f70dcbbdc..4afc2e691 100644 --- a/sqlspec/utils/portal.py +++ b/sqlspec/utils/portal.py @@ -6,9 +6,10 @@ import asyncio import functools +import os import queue import threading -from typing import TYPE_CHECKING, Any, TypeVar +from typing import TYPE_CHECKING, Any, TypeVar, cast from sqlspec.exceptions import ImproperConfigurationError from sqlspec.utils.logging import get_logger @@ -44,6 +45,7 @@ def __init__(self) -> None: self._loop: asyncio.AbstractEventLoop | None = None self._thread: threading.Thread | None = None self._ready_event: threading.Event = threading.Event() + self._pid: int | None = None @property def portal(self) -> "Portal": @@ -99,6 +101,7 @@ def start(self) -> None: self._thread = threading.Thread(target=self._run_event_loop, daemon=True) self._thread.start() self._ready_event.wait() + self._pid = os.getpid() logger.debug("Portal provider started") def stop(self) -> None: @@ -120,6 +123,7 @@ def stop(self) -> None: self._loop = None self._thread = None self._ready_event.clear() + self._pid = None logger.debug("Portal provider stopped") def _run_event_loop(self) -> None: @@ -168,8 +172,8 @@ def call(self, func: "Callable[..., Coroutine[Any, Any, _R]]", *args: Any, **kwa Raises: ImproperConfigurationError: If portal provider not started. """ - if self._loop is None: - msg = "Portal provider not started. Call start() first." + if self._loop is None or not self.is_running: + msg = "Portal provider not running. Call start() first." raise ImproperConfigurationError(msg) local_result_queue: queue.Queue[tuple[_R | None, Exception | None]] = queue.Queue() @@ -257,6 +261,7 @@ def __init__(self) -> None: self._provider: PortalProvider | None = None self._portal: Portal | None = None self._lock = threading.Lock() + self._pid: int | None = None def get_or_create_portal(self) -> Portal: """Get or create the global portal instance. @@ -267,15 +272,19 @@ def get_or_create_portal(self) -> Portal: Returns: Global portal instance. """ - if self._portal is None: + current_pid = os.getpid() + if self._needs_restart(current_pid): with self._lock: - if self._portal is None: + if self._needs_restart(current_pid): + if self._provider is not None: + self._provider.stop() self._provider = PortalProvider() self._provider.start() self._portal = Portal(self._provider) + self._pid = current_pid logger.debug("Global portal provider created and started") - return self._portal + return cast("Portal", self._portal) @property def is_running(self) -> bool: @@ -295,8 +304,15 @@ def stop(self) -> None: self._provider.stop() self._provider = None self._portal = None + self._pid = None logger.debug("Global portal provider stopped") + def _needs_restart(self, current_pid: int) -> bool: + provider_missing = self._provider is None or not self._provider.is_running + portal_missing = self._portal is None + pid_changed = self._pid is not None and self._pid != current_pid + return portal_missing or provider_missing or pid_changed + def get_global_portal() -> Portal: """Get the global portal instance for async-to-sync bridging. diff --git a/tests/unit/test_utils/test_portal.py b/tests/unit/test_utils/test_portal.py index 5a2274a50..bf25f5789 100644 --- a/tests/unit/test_utils/test_portal.py +++ b/tests/unit/test_utils/test_portal.py @@ -95,6 +95,16 @@ def test_portal_provider_call(async_multiply: Callable[[int], Coroutine[Any, Any provider.stop() +def test_portal_provider_call_after_stop(async_add: Callable[[int, int], Coroutine[Any, Any, int]]) -> None: + """PortalProvider.call raises once the provider has been stopped.""" + provider = PortalProvider() + provider.start() + provider.stop() + + with pytest.raises(ImproperConfigurationError, match="Portal provider not running"): + provider.call(async_add, 1, 2) + + def test_portal_provider_call_with_kwargs(async_add: Callable[[int, int], Coroutine[Any, Any, int]]) -> None: """PortalProvider.call supports keyword arguments.""" provider = PortalProvider() @@ -116,7 +126,7 @@ def test_portal_provider_call_not_started() -> None: async def dummy() -> int: return 42 - with pytest.raises(ImproperConfigurationError, match="Portal provider not started"): + with pytest.raises(ImproperConfigurationError, match="Portal provider not running"): provider.call(dummy) @@ -205,6 +215,26 @@ def test_portal_manager_lazy_initialization() -> None: manager.stop() +def test_portal_manager_restarts_after_pid_change(monkeypatch: Any) -> None: + """PortalManager rebuilds the portal when it detects a PID change.""" + manager = PortalManager() + portal1 = manager.get_or_create_portal() + provider1 = manager._provider # type: ignore[attr-defined] + + assert manager.is_running + assert provider1 is not None + + monkeypatch.setattr(manager, "_pid", -1) + + portal2 = manager.get_or_create_portal() + provider2 = manager._provider # type: ignore[attr-defined] + + assert portal2 is not portal1 + assert provider2 is not provider1 + + manager.stop() + + def test_portal_manager_stop() -> None: """PortalManager.stop cleans up portal provider.""" manager = PortalManager() From 37f6869646ce8248596b9503d9384b662fd7cab5 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Fri, 14 Nov 2025 18:49:43 +0000 Subject: [PATCH 06/10] fix: bigquery wants a where clause on the delete statement --- .pre-commit-config.yaml | 2 +- pyproject.toml | 12 +-- .../test_bigquery/test_driver.py | 4 +- uv.lock | 100 +++++++++--------- 4 files changed, 60 insertions(+), 58 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bc91f6d61..98393a8cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - id: mixed-line-ending - id: trailing-whitespace - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: "v0.14.4" + rev: "v0.14.5" hooks: - id: ruff args: ["--fix"] diff --git a/pyproject.toml b/pyproject.toml index a568b67cf..629845b96 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -188,12 +188,12 @@ include = [ "sqlspec/observability/_spans.py", # === STORAGE LAYER === - "sqlspec/storage/_utils.py", - "sqlspec/storage/registry.py", - "sqlspec/storage/backends/base.py", - "sqlspec/storage/backends/obstore.py", - "sqlspec/storage/backends/fsspec.py", - "sqlspec/storage/backends/local.py", + # "sqlspec/storage/_utils.py", + # "sqlspec/storage/registry.py", + # "sqlspec/storage/backends/base.py", + # "sqlspec/storage/backends/obstore.py", + # "sqlspec/storage/backends/fsspec.py", + # "sqlspec/storage/backends/local.py", ] mypy-args = [ "--ignore-missing-imports", diff --git a/tests/integration/test_adapters/test_bigquery/test_driver.py b/tests/integration/test_adapters/test_bigquery/test_driver.py index 8530ec8f1..802e08906 100644 --- a/tests/integration/test_adapters/test_bigquery/test_driver.py +++ b/tests/integration/test_adapters/test_bigquery/test_driver.py @@ -221,7 +221,7 @@ def test_bigquery_complex_queries(bigquery_session: BigQueryDriver, driver_test_ def test_bigquery_statement_stack_sequential(bigquery_session: BigQueryDriver, driver_test_table: str) -> None: """StatementStack executions should remain sequential on BigQuery.""" - bigquery_session.execute(f"DELETE FROM {driver_test_table}") + bigquery_session.execute(f"DELETE FROM {driver_test_table} WHERE id IS NOT NULL") stack = ( StatementStack() @@ -241,7 +241,7 @@ def test_bigquery_statement_stack_sequential(bigquery_session: BigQueryDriver, d def test_bigquery_statement_stack_continue_on_error(bigquery_session: BigQueryDriver, driver_test_table: str) -> None: """Continue-on-error should surface BigQuery failures but keep executing.""" - bigquery_session.execute(f"DELETE FROM {driver_test_table}") + bigquery_session.execute(f"DELETE FROM {driver_test_table} WHERE id IS NOT NULL") stack = ( StatementStack() diff --git a/uv.lock b/uv.lock index 4913d4a1e..af3809b40 100644 --- a/uv.lock +++ b/uv.lock @@ -734,15 +734,15 @@ wheels = [ [[package]] name = "cachecontrol" -version = "0.14.3" +version = "0.14.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "msgpack" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/58/3a/0cbeb04ea57d2493f3ec5a069a117ab467f85e4a10017c6d854ddcbff104/cachecontrol-0.14.3.tar.gz", hash = "sha256:73e7efec4b06b20d9267b441c1f733664f989fb8688391b670ca812d70795d11", size = 28985, upload-time = "2025-04-30T16:45:06.135Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/f6/c972b32d80760fb79d6b9eeb0b3010a46b89c0b23cf6329417ff7886cd22/cachecontrol-0.14.4.tar.gz", hash = "sha256:e6220afafa4c22a47dd0badb319f84475d79108100d04e26e8542ef7d3ab05a1", size = 16150, upload-time = "2025-11-14T04:32:13.138Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/81/4c/800b0607b00b3fd20f1087f80ab53d6b4d005515b0f773e4831e37cfa83f/cachecontrol-0.14.3-py3-none-any.whl", hash = "sha256:b35e44a3113f17d2a31c1e6b27b9de6d4405f84ae51baa8c1d3cc5b633010cae", size = 21802, upload-time = "2025-04-30T16:45:03.863Z" }, + { url = "https://files.pythonhosted.org/packages/ef/79/c45f2d53efe6ada1110cf6f9fca095e4ff47a0454444aefdde6ac4789179/cachecontrol-0.14.4-py3-none-any.whl", hash = "sha256:b7ac014ff72ee199b5f8af1de29d60239954f223e948196fa3d84adaffc71d2b", size = 22247, upload-time = "2025-11-14T04:32:11.733Z" }, ] [package.optional-dependencies] @@ -752,11 +752,11 @@ filecache = [ [[package]] name = "cachetools" -version = "6.2.1" +version = "6.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/7e/b975b5814bd36faf009faebe22c1072a1fa1168db34d285ef0ba071ad78c/cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201", size = 31325, upload-time = "2025-10-12T14:55:30.139Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/96/c5/1e741d26306c42e2bf6ab740b2202872727e0f606033c9dd713f8b93f5a8/cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701", size = 11280, upload-time = "2025-10-12T14:55:28.382Z" }, + { url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" }, ] [[package]] @@ -1363,7 +1363,7 @@ wheels = [ [[package]] name = "fastapi" -version = "0.121.1" +version = "0.121.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -1371,9 +1371,9 @@ dependencies = [ { name = "starlette" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6b/a4/29e1b861fc9017488ed02ff1052feffa40940cb355ed632a8845df84ce84/fastapi-0.121.1.tar.gz", hash = "sha256:b6dba0538fd15dab6fe4d3e5493c3957d8a9e1e9257f56446b5859af66f32441", size = 342523, upload-time = "2025-11-08T21:48:14.068Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/48/f08f264da34cf160db82c62ffb335e838b1fc16cbcc905f474c7d4c815db/fastapi-0.121.2.tar.gz", hash = "sha256:ca8e932b2b823ec1721c641e3669472c855ad9564a2854c9899d904c2848b8b9", size = 342944, upload-time = "2025-11-13T17:05:54.692Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/94/fd/2e6f7d706899cc08690c5f6641e2ffbfffe019e8f16ce77104caa5730910/fastapi-0.121.1-py3-none-any.whl", hash = "sha256:2c5c7028bc3a58d8f5f09aecd3fd88a000ccc0c5ad627693264181a3c33aa1fc", size = 109192, upload-time = "2025-11-08T21:48:12.458Z" }, + { url = "https://files.pythonhosted.org/packages/eb/23/dfb161e91db7c92727db505dc72a384ee79681fe0603f706f9f9f52c2901/fastapi-0.121.2-py3-none-any.whl", hash = "sha256:f2d80b49a86a846b70cc3a03eb5ea6ad2939298bf6a7fe377aa9cd3dd079d358", size = 109201, upload-time = "2025-11-13T17:05:52.718Z" }, ] [[package]] @@ -2081,7 +2081,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.50.0" +version = "1.50.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2093,9 +2093,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5f/7b/0d0c8f3a52cfda38064e650f7d2c02a7108d3a34d161bd5191069f909cf1/google_genai-1.50.0.tar.gz", hash = "sha256:b1ee723b3491977166cf268e6fb44e5dc430fbbd3c45011e752826a4ffdf2066", size = 254654, upload-time = "2025-11-12T22:45:21.964Z" } +sdist = { url = "https://files.pythonhosted.org/packages/09/74/1382f655a8c24adc2811f113018ff2b3884f333284ba9bff5c57f8dbcbba/google_genai-1.50.1.tar.gz", hash = "sha256:8f0d95b1b165df71e6a7e1c0d0cadb5fad30f913f42c6b131b9ebb504eec0e5f", size = 254693, upload-time = "2025-11-13T23:17:22.526Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/0c/959a1343003bbbb50b20541304c5eee5564225182c285aab3e0d09f24db0/google_genai-1.50.0-py3-none-any.whl", hash = "sha256:adfb8ab3fca612693c1778267649d955757f95a7a1bf97e781802ab3b5b993a0", size = 257311, upload-time = "2025-11-12T22:45:20.731Z" }, + { url = "https://files.pythonhosted.org/packages/30/6b/78a7588d9a4f6c8c8ed326a32385d0566a3262c91c3f7a005e4231207894/google_genai-1.50.1-py3-none-any.whl", hash = "sha256:15ae694b080269c53d325dcce94622f33e94cf81bd2123f029ab77e6b8f09eab", size = 257324, upload-time = "2025-11-13T23:17:21.259Z" }, ] [[package]] @@ -2658,7 +2658,7 @@ wheels = [ [[package]] name = "mcp" -version = "1.21.0" +version = "1.21.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2672,11 +2672,13 @@ dependencies = [ { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "sse-starlette" }, { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/33/54/dd2330ef4611c27ae59124820863c34e1d3edb1133c58e6375e2d938c9c5/mcp-1.21.0.tar.gz", hash = "sha256:bab0a38e8f8c48080d787233343f8d301b0e1e95846ae7dead251b2421d99855", size = 452697, upload-time = "2025-11-06T23:19:58.432Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/25/4df633e7574254ada574822db2245bbee424725d1b01bccae10bf128794e/mcp-1.21.1.tar.gz", hash = "sha256:540e6ac4b12b085c43f14879fde04cbdb10148a09ea9492ff82d8c7ba651a302", size = 469071, upload-time = "2025-11-13T20:33:46.139Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/39/47/850b6edc96c03bd44b00de9a0ca3c1cc71e0ba1cd5822955bc9e4eb3fad3/mcp-1.21.0-py3-none-any.whl", hash = "sha256:598619e53eb0b7a6513db38c426b28a4bdf57496fed04332100d2c56acade98b", size = 173672, upload-time = "2025-11-06T23:19:56.508Z" }, + { url = "https://files.pythonhosted.org/packages/49/af/01fb42df59ad15925ffc1e2e609adafddd3ac4572f606faae0dc8b55ba0c/mcp-1.21.1-py3-none-any.whl", hash = "sha256:dd35abe36d68530a8a1291daa25d50276d8731e545c0434d6e250a3700dd2a6d", size = 174852, upload-time = "2025-11-13T20:33:44.502Z" }, ] [[package]] @@ -3950,17 +3952,17 @@ wheels = [ [[package]] name = "protobuf" -version = "6.33.0" +version = "6.33.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/ff/64a6c8f420818bb873713988ca5492cba3a7946be57e027ac63495157d97/protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954", size = 443463, upload-time = "2025-10-15T20:39:52.159Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/03/a1440979a3f74f16cab3b75b0da1a1a7f922d56a8ddea96092391998edc0/protobuf-6.33.1.tar.gz", hash = "sha256:97f65757e8d09870de6fd973aeddb92f85435607235d20b2dfed93405d00c85b", size = 443432, upload-time = "2025-11-13T16:44:18.895Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/ee/52b3fa8feb6db4a833dfea4943e175ce645144532e8a90f72571ad85df4e/protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035", size = 425593, upload-time = "2025-10-15T20:39:40.29Z" }, - { url = "https://files.pythonhosted.org/packages/7b/c6/7a465f1825872c55e0341ff4a80198743f73b69ce5d43ab18043699d1d81/protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee", size = 436882, upload-time = "2025-10-15T20:39:42.841Z" }, - { url = "https://files.pythonhosted.org/packages/e1/a9/b6eee662a6951b9c3640e8e452ab3e09f117d99fc10baa32d1581a0d4099/protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455", size = 427521, upload-time = "2025-10-15T20:39:43.803Z" }, - { url = "https://files.pythonhosted.org/packages/10/35/16d31e0f92c6d2f0e77c2a3ba93185130ea13053dd16200a57434c882f2b/protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90", size = 324445, upload-time = "2025-10-15T20:39:44.932Z" }, - { url = "https://files.pythonhosted.org/packages/e6/eb/2a981a13e35cda8b75b5585aaffae2eb904f8f351bdd3870769692acbd8a/protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298", size = 339159, upload-time = "2025-10-15T20:39:46.186Z" }, - { url = "https://files.pythonhosted.org/packages/21/51/0b1cbad62074439b867b4e04cc09b93f6699d78fd191bed2bbb44562e077/protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef", size = 323172, upload-time = "2025-10-15T20:39:47.465Z" }, - { url = "https://files.pythonhosted.org/packages/07/d1/0a28c21707807c6aacd5dc9c3704b2aa1effbf37adebd8caeaf68b17a636/protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995", size = 170477, upload-time = "2025-10-15T20:39:51.311Z" }, + { url = "https://files.pythonhosted.org/packages/06/f1/446a9bbd2c60772ca36556bac8bfde40eceb28d9cc7838755bc41e001d8f/protobuf-6.33.1-cp310-abi3-win32.whl", hash = "sha256:f8d3fdbc966aaab1d05046d0240dd94d40f2a8c62856d41eaa141ff64a79de6b", size = 425593, upload-time = "2025-11-13T16:44:06.275Z" }, + { url = "https://files.pythonhosted.org/packages/a6/79/8780a378c650e3df849b73de8b13cf5412f521ca2ff9b78a45c247029440/protobuf-6.33.1-cp310-abi3-win_amd64.whl", hash = "sha256:923aa6d27a92bf44394f6abf7ea0500f38769d4b07f4be41cb52bd8b1123b9ed", size = 436883, upload-time = "2025-11-13T16:44:09.222Z" }, + { url = "https://files.pythonhosted.org/packages/cd/93/26213ff72b103ae55bb0d73e7fb91ea570ef407c3ab4fd2f1f27cac16044/protobuf-6.33.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:fe34575f2bdde76ac429ec7b570235bf0c788883e70aee90068e9981806f2490", size = 427522, upload-time = "2025-11-13T16:44:10.475Z" }, + { url = "https://files.pythonhosted.org/packages/c2/32/df4a35247923393aa6b887c3b3244a8c941c32a25681775f96e2b418f90e/protobuf-6.33.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:f8adba2e44cde2d7618996b3fc02341f03f5bc3f2748be72dc7b063319276178", size = 324445, upload-time = "2025-11-13T16:44:11.869Z" }, + { url = "https://files.pythonhosted.org/packages/8e/d0/d796e419e2ec93d2f3fa44888861c3f88f722cde02b7c3488fcc6a166820/protobuf-6.33.1-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:0f4cf01222c0d959c2b399142deb526de420be8236f22c71356e2a544e153c53", size = 339161, upload-time = "2025-11-13T16:44:12.778Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/3c5f05a4af06649547027d288747f68525755de692a26a7720dced3652c0/protobuf-6.33.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:8fd7d5e0eb08cd5b87fd3df49bc193f5cfd778701f47e11d127d0afc6c39f1d1", size = 323171, upload-time = "2025-11-13T16:44:14.035Z" }, + { url = "https://files.pythonhosted.org/packages/08/b4/46310463b4f6ceef310f8348786f3cff181cea671578e3d9743ba61a459e/protobuf-6.33.1-py3-none-any.whl", hash = "sha256:d595a9fd694fdeb061a62fbe10eb039cc1e444df81ec9bb70c7fc59ebcb1eafa", size = 170477, upload-time = "2025-11-13T16:44:17.633Z" }, ] [[package]] @@ -5153,28 +5155,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/df/55/cccfca45157a2031dcbb5a462a67f7cf27f8b37d4b3b1cd7438f0f5c1df6/ruff-0.14.4.tar.gz", hash = "sha256:f459a49fe1085a749f15414ca76f61595f1a2cc8778ed7c279b6ca2e1fd19df3", size = 5587844, upload-time = "2025-11-06T22:07:45.033Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/17/b9/67240254166ae1eaa38dec32265e9153ac53645a6c6670ed36ad00722af8/ruff-0.14.4-py3-none-linux_armv6l.whl", hash = "sha256:e6604613ffbcf2297cd5dcba0e0ac9bd0c11dc026442dfbb614504e87c349518", size = 12606781, upload-time = "2025-11-06T22:07:01.841Z" }, - { url = "https://files.pythonhosted.org/packages/46/c8/09b3ab245d8652eafe5256ab59718641429f68681ee713ff06c5c549f156/ruff-0.14.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d99c0b52b6f0598acede45ee78288e5e9b4409d1ce7f661f0fa36d4cbeadf9a4", size = 12946765, upload-time = "2025-11-06T22:07:05.858Z" }, - { url = "https://files.pythonhosted.org/packages/14/bb/1564b000219144bf5eed2359edc94c3590dd49d510751dad26202c18a17d/ruff-0.14.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9358d490ec030f1b51d048a7fd6ead418ed0826daf6149e95e30aa67c168af33", size = 11928120, upload-time = "2025-11-06T22:07:08.023Z" }, - { url = "https://files.pythonhosted.org/packages/a3/92/d5f1770e9988cc0742fefaa351e840d9aef04ec24ae1be36f333f96d5704/ruff-0.14.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b40d27924f1f02dfa827b9c0712a13c0e4b108421665322218fc38caf615c2", size = 12370877, upload-time = "2025-11-06T22:07:10.015Z" }, - { url = "https://files.pythonhosted.org/packages/e2/29/e9282efa55f1973d109faf839a63235575519c8ad278cc87a182a366810e/ruff-0.14.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f5e649052a294fe00818650712083cddc6cc02744afaf37202c65df9ea52efa5", size = 12408538, upload-time = "2025-11-06T22:07:13.085Z" }, - { url = "https://files.pythonhosted.org/packages/8e/01/930ed6ecfce130144b32d77d8d69f5c610e6d23e6857927150adf5d7379a/ruff-0.14.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa082a8f878deeba955531f975881828fd6afd90dfa757c2b0808aadb437136e", size = 13141942, upload-time = "2025-11-06T22:07:15.386Z" }, - { url = "https://files.pythonhosted.org/packages/6a/46/a9c89b42b231a9f487233f17a89cbef9d5acd538d9488687a02ad288fa6b/ruff-0.14.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1043c6811c2419e39011890f14d0a30470f19d47d197c4858b2787dfa698f6c8", size = 14544306, upload-time = "2025-11-06T22:07:17.631Z" }, - { url = "https://files.pythonhosted.org/packages/78/96/9c6cf86491f2a6d52758b830b89b78c2ae61e8ca66b86bf5a20af73d20e6/ruff-0.14.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a9f3a936ac27fb7c2a93e4f4b943a662775879ac579a433291a6f69428722649", size = 14210427, upload-time = "2025-11-06T22:07:19.832Z" }, - { url = "https://files.pythonhosted.org/packages/71/f4/0666fe7769a54f63e66404e8ff698de1dcde733e12e2fd1c9c6efb689cb5/ruff-0.14.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95643ffd209ce78bc113266b88fba3d39e0461f0cbc8b55fb92505030fb4a850", size = 13658488, upload-time = "2025-11-06T22:07:22.32Z" }, - { url = "https://files.pythonhosted.org/packages/ee/79/6ad4dda2cfd55e41ac9ed6d73ef9ab9475b1eef69f3a85957210c74ba12c/ruff-0.14.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:456daa2fa1021bc86ca857f43fe29d5d8b3f0e55e9f90c58c317c1dcc2afc7b5", size = 13354908, upload-time = "2025-11-06T22:07:24.347Z" }, - { url = "https://files.pythonhosted.org/packages/b5/60/f0b6990f740bb15c1588601d19d21bcc1bd5de4330a07222041678a8e04f/ruff-0.14.4-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:f911bba769e4a9f51af6e70037bb72b70b45a16db5ce73e1f72aefe6f6d62132", size = 13587803, upload-time = "2025-11-06T22:07:26.327Z" }, - { url = "https://files.pythonhosted.org/packages/c9/da/eaaada586f80068728338e0ef7f29ab3e4a08a692f92eb901a4f06bbff24/ruff-0.14.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:76158a7369b3979fa878612c623a7e5430c18b2fd1c73b214945c2d06337db67", size = 12279654, upload-time = "2025-11-06T22:07:28.46Z" }, - { url = "https://files.pythonhosted.org/packages/66/d4/b1d0e82cf9bf8aed10a6d45be47b3f402730aa2c438164424783ac88c0ed/ruff-0.14.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f3b8f3b442d2b14c246e7aeca2e75915159e06a3540e2f4bed9f50d062d24469", size = 12357520, upload-time = "2025-11-06T22:07:31.468Z" }, - { url = "https://files.pythonhosted.org/packages/04/f4/53e2b42cc82804617e5c7950b7079d79996c27e99c4652131c6a1100657f/ruff-0.14.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c62da9a06779deecf4d17ed04939ae8b31b517643b26370c3be1d26f3ef7dbde", size = 12719431, upload-time = "2025-11-06T22:07:33.831Z" }, - { url = "https://files.pythonhosted.org/packages/a2/94/80e3d74ed9a72d64e94a7b7706b1c1ebaa315ef2076fd33581f6a1cd2f95/ruff-0.14.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5a443a83a1506c684e98acb8cb55abaf3ef725078be40237463dae4463366349", size = 13464394, upload-time = "2025-11-06T22:07:35.905Z" }, - { url = "https://files.pythonhosted.org/packages/54/1a/a49f071f04c42345c793d22f6cf5e0920095e286119ee53a64a3a3004825/ruff-0.14.4-py3-none-win32.whl", hash = "sha256:643b69cb63cd996f1fc7229da726d07ac307eae442dd8974dbc7cf22c1e18fff", size = 12493429, upload-time = "2025-11-06T22:07:38.43Z" }, - { url = "https://files.pythonhosted.org/packages/bc/22/e58c43e641145a2b670328fb98bc384e20679b5774258b1e540207580266/ruff-0.14.4-py3-none-win_amd64.whl", hash = "sha256:26673da283b96fe35fa0c939bf8411abec47111644aa9f7cfbd3c573fb125d2c", size = 13635380, upload-time = "2025-11-06T22:07:40.496Z" }, - { url = "https://files.pythonhosted.org/packages/30/bd/4168a751ddbbf43e86544b4de8b5c3b7be8d7167a2a5cb977d274e04f0a1/ruff-0.14.4-py3-none-win_arm64.whl", hash = "sha256:dd09c292479596b0e6fec8cd95c65c3a6dc68e9ad17b8f2382130f87ff6a75bb", size = 12663065, upload-time = "2025-11-06T22:07:42.603Z" }, +version = "0.14.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/fa/fbb67a5780ae0f704876cb8ac92d6d76da41da4dc72b7ed3565ab18f2f52/ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1", size = 5615944, upload-time = "2025-11-13T19:58:51.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/31/c07e9c535248d10836a94e4f4e8c5a31a1beed6f169b31405b227872d4f4/ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594", size = 13171630, upload-time = "2025-11-13T19:57:54.894Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5c/283c62516dca697cd604c2796d1487396b7a436b2f0ecc3fd412aca470e0/ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72", size = 13413925, upload-time = "2025-11-13T19:57:59.181Z" }, + { url = "https://files.pythonhosted.org/packages/b6/f3/aa319f4afc22cb6fcba2b9cdfc0f03bbf747e59ab7a8c5e90173857a1361/ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a", size = 12574040, upload-time = "2025-11-13T19:58:02.056Z" }, + { url = "https://files.pythonhosted.org/packages/f9/7f/cb5845fcc7c7e88ed57f58670189fc2ff517fe2134c3821e77e29fd3b0c8/ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f", size = 13009755, upload-time = "2025-11-13T19:58:05.172Z" }, + { url = "https://files.pythonhosted.org/packages/21/d2/bcbedbb6bcb9253085981730687ddc0cc7b2e18e8dc13cf4453de905d7a0/ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68", size = 12937641, upload-time = "2025-11-13T19:58:08.345Z" }, + { url = "https://files.pythonhosted.org/packages/a4/58/e25de28a572bdd60ffc6bb71fc7fd25a94ec6a076942e372437649cbb02a/ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7", size = 13610854, upload-time = "2025-11-13T19:58:11.419Z" }, + { url = "https://files.pythonhosted.org/packages/7d/24/43bb3fd23ecee9861970978ea1a7a63e12a204d319248a7e8af539984280/ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78", size = 15061088, upload-time = "2025-11-13T19:58:14.551Z" }, + { url = "https://files.pythonhosted.org/packages/23/44/a022f288d61c2f8c8645b24c364b719aee293ffc7d633a2ca4d116b9c716/ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb", size = 14734717, upload-time = "2025-11-13T19:58:17.518Z" }, + { url = "https://files.pythonhosted.org/packages/58/81/5c6ba44de7e44c91f68073e0658109d8373b0590940efe5bd7753a2585a3/ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2", size = 14028812, upload-time = "2025-11-13T19:58:20.533Z" }, + { url = "https://files.pythonhosted.org/packages/ad/ef/41a8b60f8462cb320f68615b00299ebb12660097c952c600c762078420f8/ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19", size = 13825656, upload-time = "2025-11-13T19:58:23.345Z" }, + { url = "https://files.pythonhosted.org/packages/7c/00/207e5de737fdb59b39eb1fac806904fe05681981b46d6a6db9468501062e/ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4", size = 13959922, upload-time = "2025-11-13T19:58:26.537Z" }, + { url = "https://files.pythonhosted.org/packages/bc/7e/fa1f5c2776db4be405040293618846a2dece5c70b050874c2d1f10f24776/ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1", size = 12932501, upload-time = "2025-11-13T19:58:29.822Z" }, + { url = "https://files.pythonhosted.org/packages/67/d8/d86bf784d693a764b59479a6bbdc9515ae42c340a5dc5ab1dabef847bfaa/ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151", size = 12927319, upload-time = "2025-11-13T19:58:32.923Z" }, + { url = "https://files.pythonhosted.org/packages/ac/de/ee0b304d450ae007ce0cb3e455fe24fbcaaedae4ebaad6c23831c6663651/ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465", size = 13206209, upload-time = "2025-11-13T19:58:35.952Z" }, + { url = "https://files.pythonhosted.org/packages/33/aa/193ca7e3a92d74f17d9d5771a765965d2cf42c86e6f0fd95b13969115723/ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367", size = 13953709, upload-time = "2025-11-13T19:58:39.002Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f1/7119e42aa1d3bf036ffc9478885c2e248812b7de9abea4eae89163d2929d/ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b", size = 12925808, upload-time = "2025-11-13T19:58:42.779Z" }, + { url = "https://files.pythonhosted.org/packages/3b/9d/7c0a255d21e0912114784e4a96bf62af0618e2190cae468cd82b13625ad2/ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621", size = 14331546, upload-time = "2025-11-13T19:58:45.691Z" }, + { url = "https://files.pythonhosted.org/packages/e5/80/69756670caedcf3b9be597a6e12276a6cf6197076eb62aad0c608f8efce0/ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4", size = 13433331, upload-time = "2025-11-13T19:58:48.434Z" }, ] [[package]] @@ -6461,11 +6463,11 @@ wheels = [ [[package]] name = "trove-classifiers" -version = "2025.9.11.17" +version = "2025.11.14.15" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ca/9a/778622bc06632529817c3c524c82749a112603ae2bbcf72ee3eb33a2c4f1/trove_classifiers-2025.9.11.17.tar.gz", hash = "sha256:931ca9841a5e9c9408bc2ae67b50d28acf85bef56219b56860876dd1f2d024dd", size = 16975, upload-time = "2025-09-11T17:07:50.97Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/a9/880cccf76af9e7b322112f52e4e2dbb3534cbe671197b8f443a42189dfc7/trove_classifiers-2025.11.14.15.tar.gz", hash = "sha256:6b60f49d40bbd895bc61d8dc414fc2f2286d70eb72ed23548db8cf94f62804ca", size = 16995, upload-time = "2025-11-14T15:23:13.78Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/85/a4ff8758c66f1fc32aa5e9a145908394bf9cf1c79ffd1113cfdeb77e74e4/trove_classifiers-2025.9.11.17-py3-none-any.whl", hash = "sha256:5d392f2d244deb1866556457d6f3516792124a23d1c3a463a2e8668a5d1c15dd", size = 14158, upload-time = "2025-09-11T17:07:49.886Z" }, + { url = "https://files.pythonhosted.org/packages/49/f6/73c4aa003d1237ee9bea8a46f49dc38c45dfe95af4f0da7e60678d388011/trove_classifiers-2025.11.14.15-py3-none-any.whl", hash = "sha256:d1dac259c1e908939862e3331177931c6df0a37af2c1a8debcc603d9115fcdd9", size = 14191, upload-time = "2025-11-14T15:23:12.467Z" }, ] [[package]] From 320b43283fa250d1279da166ae8f9aaef3550b1a Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Fri, 14 Nov 2025 18:59:27 +0000 Subject: [PATCH 07/10] fix: mark the mixin file as private like the others --- sqlspec/driver/mixins/__init__.py | 2 +- sqlspec/driver/mixins/{storage.py => _storage.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename sqlspec/driver/mixins/{storage.py => _storage.py} (100%) diff --git a/sqlspec/driver/mixins/__init__.py b/sqlspec/driver/mixins/__init__.py index f5fdb7563..0d1191b04 100644 --- a/sqlspec/driver/mixins/__init__.py +++ b/sqlspec/driver/mixins/__init__.py @@ -2,6 +2,6 @@ from sqlspec.driver.mixins._result_tools import ToSchemaMixin from sqlspec.driver.mixins._sql_translator import SQLTranslatorMixin -from sqlspec.driver.mixins.storage import StorageDriverMixin +from sqlspec.driver.mixins._storage import StorageDriverMixin __all__ = ("SQLTranslatorMixin", "StorageDriverMixin", "ToSchemaMixin") diff --git a/sqlspec/driver/mixins/storage.py b/sqlspec/driver/mixins/_storage.py similarity index 100% rename from sqlspec/driver/mixins/storage.py rename to sqlspec/driver/mixins/_storage.py From aef0439ae0e33f859be343ac4aeb0a6cf1b75b59 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Fri, 14 Nov 2025 19:18:00 +0000 Subject: [PATCH 08/10] feat(bigquery): implement job retry logic and emulator detection in BigQueryDriver --- sqlspec/adapters/bigquery/driver.py | 61 +++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/sqlspec/adapters/bigquery/driver.py b/sqlspec/adapters/bigquery/driver.py index 465f551ce..fd41f76f0 100644 --- a/sqlspec/adapters/bigquery/driver.py +++ b/sqlspec/adapters/bigquery/driver.py @@ -7,11 +7,13 @@ import datetime import io import logging +import os from collections.abc import Callable from decimal import Decimal from typing import TYPE_CHECKING, Any, cast import sqlglot +from google.api_core.retry import Retry from google.cloud.bigquery import ArrayQueryParameter, LoadJobConfig, QueryJob, QueryJobConfig, ScalarQueryParameter from google.cloud.exceptions import GoogleCloudError @@ -355,9 +357,12 @@ class BigQueryDriver(SyncDriverAdapterBase): __slots__ = ( "_data_dictionary", "_default_query_job_config", + "_job_retry", + "_job_retry_deadline", "_json_serializer", "_literal_inliner", "_type_converter", + "_using_emulator", ) dialect = "bigquery" @@ -386,6 +391,9 @@ def __init__( super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features) self._default_query_job_config: QueryJobConfig | None = (driver_features or {}).get("default_query_job_config") self._data_dictionary: SyncDataDictionaryBase | None = None + self._using_emulator = self._detect_emulator_endpoint(connection) + self._job_retry_deadline = float(features.get("job_retry_deadline", 60.0)) + self._job_retry = None if self._using_emulator else self._build_job_retry() def with_cursor(self, connection: "BigQueryConnection") -> "BigQueryCursor": """Create context manager for cursor management. @@ -408,6 +416,51 @@ def handle_database_exceptions(self) -> "AbstractContextManager[None]": """Handle database-specific exceptions and wrap them appropriately.""" return BigQueryExceptionHandler() + @staticmethod + def _detect_emulator_endpoint(connection: BigQueryConnection) -> bool: + """Detect whether the BigQuery client targets an emulator endpoint.""" + + emulator_host = os.getenv("BIGQUERY_EMULATOR_HOST") or os.getenv("BIGQUERY_EMULATOR_HOST_HTTP") + if emulator_host: + return True + + api_base_url = getattr(getattr(connection, "_connection", None), "API_BASE_URL", "") + if not api_base_url: + return False + return "googleapis.com" not in api_base_url + + def _build_job_retry(self) -> Retry: + """Build retry policy for job restarts based on error reason codes.""" + + return Retry(predicate=self._should_retry_job_exception, deadline=self._job_retry_deadline) + + @staticmethod + def _should_retry_job_exception(exception: Exception) -> bool: + """Return True when a BigQuery job exception is safe to retry.""" + + if not isinstance(exception, GoogleCloudError): + return False + + errors = getattr(exception, "errors", None) or [] + retryable_reasons = { + "backendError", + "internalError", + "jobInternalError", + "rateLimitExceeded", + "jobRateLimitExceeded", + } + + for err in errors: + if not isinstance(err, dict): + continue + reason = err.get("reason") + message = (err.get("message") or "").lower() + if reason in retryable_reasons: + # Emulator sometimes reports invalid DML as jobInternalError; guard with obvious syntax hints + return not ("nonexistent_column" in message or ("column" in message and "not present" in message)) + + return False + def _should_copy_attribute(self, attr: str, source_config: QueryJobConfig) -> bool: """Check if attribute should be copied between job configs. @@ -565,7 +618,7 @@ def _execute_script(self, cursor: Any, statement: "SQL") -> ExecutionResult: for stmt in statements: job = self._run_query_job(stmt, prepared_parameters or {}, connection=cursor) - job.result() + job.result(job_retry=self._job_retry) last_job = job successful_count += 1 @@ -614,7 +667,7 @@ def _execute_many(self, cursor: Any, statement: "SQL") -> ExecutionResult: script_sql = ";\n".join(script_statements) cursor.job = self._run_query_job(script_sql, None, connection=cursor) - cursor.job.result() + cursor.job.result(job_retry=self._job_retry) affected_rows = ( cursor.job.num_dml_affected_rows if cursor.job.num_dml_affected_rows is not None else len(parameters_list) @@ -635,7 +688,7 @@ def _execute_statement(self, cursor: Any, statement: "SQL") -> ExecutionResult: cursor.job = self._run_query_job(sql, parameters, connection=cursor) if statement.returns_rows(): - job_result = cursor.job.result() + job_result = cursor.job.result(job_retry=self._job_retry) rows_list = self._rows_to_results(iter(job_result)) column_names = [field.name for field in cursor.job.schema] if cursor.job.schema else [] @@ -647,7 +700,7 @@ def _execute_statement(self, cursor: Any, statement: "SQL") -> ExecutionResult: is_select_result=True, ) - cursor.job.result() + cursor.job.result(job_retry=self._job_retry) affected_rows = cursor.job.num_dml_affected_rows or 0 return self.create_execution_result(cursor, rowcount_override=affected_rows) From 263b6fa75177ba48b81acddc31c569dd91b44096 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Fri, 14 Nov 2025 21:24:11 +0000 Subject: [PATCH 09/10] refactor: rename StackResult.raw_result to StackResult.result and update related references - Changed all instances of StackResult.raw_result to StackResult.result across the codebase. - Updated the StackResult class to reflect the new naming convention, including adjustments to its constructor and methods. - Modified adapter implementations and tests to accommodate the new result handling. - Ensured backward compatibility in tests by verifying the correct behavior of result handling in various database adapters. --- AGENTS.md | 2 +- .../patterns/stacks/query_stack_example.py | 19 +++-- docs/guides/adapters/asyncpg.md | 4 +- docs/guides/adapters/oracledb.md | 2 +- docs/guides/adapters/sqlite.md | 2 +- docs/guides/architecture/patterns.md | 2 +- docs/guides/performance/batch-execution.md | 2 +- docs/reference/query-stack.rst | 4 +- specs/guides/query-stack.md | 4 +- sqlspec/adapters/asyncpg/driver.py | 4 +- sqlspec/core/result.py | 80 +++++++++++++------ sqlspec/driver/_async.py | 4 +- sqlspec/driver/_sync.py | 4 +- sqlspec/protocols.py | 11 ++- .../test_adbc/test_adbc_driver.py | 8 +- .../test_aiosqlite/test_driver.py | 16 ++-- .../test_adapters/test_asyncmy/test_driver.py | 12 +-- .../test_adapters/test_asyncpg/test_driver.py | 16 ++-- .../test_bigquery/test_driver.py | 8 +- .../test_adapters/test_duckdb/test_driver.py | 16 ++-- .../test_adapters/test_oracledb/test_stack.py | 24 +++--- .../test_adapters/test_psqlpy/test_driver.py | 2 +- .../test_psycopg/test_async_copy.py | 6 +- .../test_adapters/test_psycopg/test_driver.py | 2 +- .../test_adapters/test_sqlite/test_driver.py | 16 ++-- tests/integration/test_stack_edge_cases.py | 14 ++-- .../test_oracledb/test_pipeline_helpers.py | 14 ++-- tests/unit/test_core/test_result.py | 16 ++-- tests/unit/test_core/test_stack.py | 2 +- tests/unit/test_driver/test_stack_base.py | 4 +- 30 files changed, 178 insertions(+), 142 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index acfd26a06..bcbe90872 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -192,7 +192,7 @@ SQLSpec is a type-safe SQL query mapper designed for minimal abstraction between - Validate inputs at push time (non-empty SQL, execute_many payloads, reject nested stacks) so drivers can assume well-formed operations. - **Adapter Responsibilities** - Add a single capability gate per adapter (e.g., Oracle pipeline version check, `psycopg.capabilities.has_pipeline()`), return `super().execute_stack()` immediately when unsupported. - - Preserve `StackResult.raw_result` by building SQL/Arrow results via `create_sql_result()` / `create_arrow_result()` instead of copying row data. + - Preserve `StackResult.result` by building SQL/Arrow results via `create_sql_result()` / `create_arrow_result()` instead of copying row data. - Honor manual toggles via `driver_features={"stack_native_disabled": True}` and document the behavior in the adapter guide. - **Telemetry + Tracing** - Always wrap adapter overrides with `StackExecutionObserver(self, stack, continue_on_error, native_pipeline=bool)`. diff --git a/docs/examples/patterns/stacks/query_stack_example.py b/docs/examples/patterns/stacks/query_stack_example.py index bd1215640..5aabdc625 100644 --- a/docs/examples/patterns/stacks/query_stack_example.py +++ b/docs/examples/patterns/stacks/query_stack_example.py @@ -3,10 +3,9 @@ import asyncio from typing import Any -from sqlspec import SQLSpec +from sqlspec import SQLSpec, StatementStack from sqlspec.adapters.aiosqlite import AiosqliteConfig from sqlspec.adapters.sqlite import SqliteConfig -from sqlspec.core import StatementStack __all__ = ("build_stack", "main", "run_async_example", "run_sync_example") @@ -70,10 +69,10 @@ def run_sync_example() -> None: _seed_sync_tables(session, 1, ("admin", "editor")) results = session.execute_stack(build_stack(user_id=1, action="sync-login")) audit_insert, user_update, role_select = results - print("[sync] rows inserted:", audit_insert.rowcount) - print("[sync] rows updated:", user_update.rowcount) - if role_select.raw_result is not None: - roles = [row["role"] for row in role_select.raw_result.data] + print("[sync] rows inserted:", audit_insert.rows_affected) + print("[sync] rows updated:", user_update.rows_affected) + if role_select.result is not None: + roles = [row["role"] for row in role_select.result.data] print("[sync] roles:", roles) @@ -87,10 +86,10 @@ async def _inner() -> None: await _seed_async_tables(session, 2, ("viewer",)) results = await session.execute_stack(build_stack(user_id=2, action="async-login")) audit_insert, user_update, role_select = results - print("[async] rows inserted:", audit_insert.rowcount) - print("[async] rows updated:", user_update.rowcount) - if role_select.raw_result is not None: - roles = [row["role"] for row in role_select.raw_result.data] + print("[async] rows inserted:", audit_insert.rows_affected) + print("[async] rows updated:", user_update.rows_affected) + if role_select.result is not None: + roles = [row["role"] for row in role_select.result.data] print("[async] roles:", roles) asyncio.run(_inner()) diff --git a/docs/guides/adapters/asyncpg.md b/docs/guides/adapters/asyncpg.md index 61aa66899..71bf262d3 100644 --- a/docs/guides/adapters/asyncpg.md +++ b/docs/guides/adapters/asyncpg.md @@ -139,13 +139,13 @@ For comprehensive configuration options and troubleshooting, see the [Google Clo `StatementStack` calls execute in a single transaction when `continue_on_error=False`, leveraging asyncpg's fast extended-query protocol to minimize round-trips. When you need partial success handling (`continue_on_error=True`), the adapter automatically disables the shared transaction and reports individual failures via `StackResult.error`. - Telemetry spans (`sqlspec.stack.execute`), metrics (`stack.execute.*`), and hashed operation logging are emitted for every stack, so production monitoring captures adoption automatically. -- The pipeline path preserves `StackResult.raw_result` for SELECT statements, so downstream helpers continue to operate on the original `SQLResult` objects. +- The pipeline path preserves `StackResult.result` for SELECT statements, so downstream helpers continue to operate on the original `SQLResult` objects. - To force the sequential fallback (for incident response or regression tests), pass `driver_features={"stack_native_disabled": True}` to the config. Example usage: ```python -from sqlspec.core import StatementStack +from sqlspec import StatementStack stack = ( StatementStack() diff --git a/docs/guides/adapters/oracledb.md b/docs/guides/adapters/oracledb.md index a8ab077d4..70869cfbf 100644 --- a/docs/guides/adapters/oracledb.md +++ b/docs/guides/adapters/oracledb.md @@ -19,7 +19,7 @@ This guide provides specific instructions and best practices for working with th ## Query Stack Support -`StatementStack` executions automatically use python-oracledb's native pipeline APIs when the adapter detects a compatible runtime (Oracle Database 23ai+ and python-oracledb ≥ 2.4.0). The pipeline path batches every operation in a stack into a single round-trip while preserving the regular `StackResult.raw_result` semantics, so downstream helpers like `get_data()` or `rowcount` continue to work without code changes. +`StatementStack` executions automatically use python-oracledb's native pipeline APIs when the adapter detects a compatible runtime (Oracle Database 23ai+ and python-oracledb ≥ 2.4.0). The pipeline path batches every operation in a stack into a single round-trip while preserving the regular `StackResult.result` semantics, so downstream helpers like `get_data()` or `rows_affected` continue to work without code changes. ### Requirements diff --git a/docs/guides/adapters/sqlite.md b/docs/guides/adapters/sqlite.md index a20d94f02..2e4244191 100644 --- a/docs/guides/adapters/sqlite.md +++ b/docs/guides/adapters/sqlite.md @@ -20,7 +20,7 @@ This guide covers `sqlite3` (sync) and `aiosqlite` (async). ## Query Stack Support - Neither `sqlite3` nor `aiosqlite` exposes a native batching primitive, so `StatementStack` reuses the base sequential executor. When `continue_on_error=False`, SQLSpec opens a transaction (if one is not already active) so the full stack succeeds or fails atomically; when `continue_on_error=True`, each statement commits immediately to match SQLite’s autocommit semantics. -- Integration coverage lives in `tests/integration/test_adapters/test_sqlite/test_driver.py::test_sqlite_statement_stack_*` and `tests/integration/test_adapters/test_aiosqlite/test_driver.py::test_aiosqlite_statement_stack_*`, ensuring both sync and async flows preserve `StackResult.raw_result` and surface per-statement errors. +- Integration coverage lives in `tests/integration/test_adapters/test_sqlite/test_driver.py::test_sqlite_statement_stack_*` and `tests/integration/test_adapters/test_aiosqlite/test_driver.py::test_aiosqlite_statement_stack_*`, ensuring both sync and async flows preserve `StackResult.result` and surface per-statement errors. ## Best Practices diff --git a/docs/guides/architecture/patterns.md b/docs/guides/architecture/patterns.md index b4d022edf..34296d874 100644 --- a/docs/guides/architecture/patterns.md +++ b/docs/guides/architecture/patterns.md @@ -42,5 +42,5 @@ This guide captures the key patterns introduced by Query Stack. Use it as the ca 1. **Version / capability gate** native execution. 2. **Respect ``stack_native_disabled`` driver feature** if provided manually (useful for integration tests). 3. **Never mutate stack operations**—always compile to driver-specific statements first. -4. **Preserve ``StackResult.raw_result``** when possible (call ``StackResult.from_sql_result`` / ``from_arrow_result``). +4. **Preserve ``StackResult.result``** when possible (call ``StackResult.from_sql_result`` / ``from_arrow_result``). 5. **Guarantee cleanup** (`commit()`/`rollback()` in `finally` blocks) even for native pipelines. diff --git a/docs/guides/performance/batch-execution.md b/docs/guides/performance/batch-execution.md index d3b9f1c3c..0030e593d 100644 --- a/docs/guides/performance/batch-execution.md +++ b/docs/guides/performance/batch-execution.md @@ -37,7 +37,7 @@ Query Stack complements (not replaces) ``execute_many``. Use this guide to choos Use the following structure when adding performance tests (see Task 6.6): ```python -from sqlspec.core import StatementStack +from sqlspec import StatementStack stack = ( StatementStack() diff --git a/docs/reference/query-stack.rst b/docs/reference/query-stack.rst index 03fb0c64c..82a504736 100644 --- a/docs/reference/query-stack.rst +++ b/docs/reference/query-stack.rst @@ -15,7 +15,7 @@ The stack system is composed of: - ``StatementStack`` – immutable builder with push helpers for execute/execute_many/execute_script/execute_arrow - ``StackOperation`` – the tuple-like value object stored inside the stack (method, statement, arguments, keyword arguments) -- ``StackResult`` – wraps the driver’s raw result while surfacing stack metadata (rowcount, warning, error) +- ``StackResult`` – wraps the driver’s raw result while surfacing stack metadata (rows_affected, warning, error) - ``AsyncDriverAdapterBase.execute_stack`` / ``SyncDriverAdapterBase.execute_stack`` – adapter hooks that select native pipelines or the sequential fallback StatementStack @@ -69,5 +69,5 @@ Usage Highlights - Build stacks once and reuse them across requests/tasks. - Call ``session.execute_stack(stack, continue_on_error=False)`` to run fail-fast or set ``continue_on_error=True`` to record per-operation errors. -- Inspect ``StackResult.raw_result`` to call helpers like ``all()``, ``one()``, ``to_pandas()``, or ``to_arrow()``. +- Inspect ``StackResult.result`` to call helpers like ``all()``, ``one()``, ``to_pandas()``, or ``to_arrow()``. - :doc:`/reference/adapters` lists per-adapter capabilities, including whether native pipelines or sequential fallback are used for stacks. diff --git a/specs/guides/query-stack.md b/specs/guides/query-stack.md index 700585475..ba59cf0d9 100644 --- a/specs/guides/query-stack.md +++ b/specs/guides/query-stack.md @@ -37,7 +37,7 @@ When using adapters with native pipelines (Oracle, psycopg, asyncpg), continue-o ## Arrow Operations -`push_execute_arrow()` delegates to `select_to_arrow()` when the adapter implements Arrow support (DuckDB, BigQuery, ADBC, etc.). The returned `StackResult.raw_result` is an `ArrowResult`, so downstream helpers like `to_pandas()` or `to_polars()` continue to work. +`push_execute_arrow()` delegates to `select_to_arrow()` when the adapter implements Arrow support (DuckDB, BigQuery, ADBC, etc.). The returned `StackResult.result` is an `ArrowResult`, so downstream helpers like `to_pandas()` or `to_polars()` continue to work. ## Telemetry and Tracing @@ -54,7 +54,7 @@ Adapters only need to report whether they used a native pipeline; the observer h | Symptom | Cause | Fix | | --- | --- | --- | | `ValueError: Cannot execute an empty StatementStack` | Stack has zero operations | Ensure you push at least one statement before calling `execute_stack()` | -| `StackExecutionError(operation_index=1, ...)` | Driver error on a specific statement | Inspect `StackResult.error` to see the wrapped exception; use `StackResult.raw_result` to inspect partial data | +| `StackExecutionError(operation_index=1, ...)` | Driver error on a specific statement | Inspect `StackResult.error` to see the wrapped exception; use `StackResult.result` to inspect partial data | | `push_execute_many` raising `TypeError` | Parameter payload not a sequence | Pass an actual list/tuple of parameter sets | | Continue-on-error seems to run sequentially on psycopg | Psycopg pipeline mode does not support partial failures | Expected—SQLSpec downgrades to sequential mode automatically | diff --git a/sqlspec/adapters/asyncpg/driver.py b/sqlspec/adapters/asyncpg/driver.py index f67864eb4..6bc37c629 100644 --- a/sqlspec/adapters/asyncpg/driver.py +++ b/sqlspec/adapters/asyncpg/driver.py @@ -357,8 +357,8 @@ async def _run_operations(observer: StackExecutionObserver) -> None: if normalized is not None and self._can_prepare_stack_operation(normalized): stack_result = await self._execute_stack_operation_prepared(normalized) else: - raw_result = await self._execute_stack_operation(operation) - stack_result = StackResult(raw_result=raw_result) + result = await self._execute_stack_operation(operation) + stack_result = StackResult(result=result) except Exception as exc: stack_error = StackExecutionError( index, diff --git a/sqlspec/core/result.py b/sqlspec/core/result.py index 597303382..349fc9b7a 100644 --- a/sqlspec/core/result.py +++ b/sqlspec/core/result.py @@ -17,6 +17,7 @@ from typing_extensions import TypeVar from sqlspec.core.compiler import OperationType +from sqlspec.core.statement import SQL from sqlspec.storage import ( AsyncStoragePipeline, StorageDestination, @@ -28,11 +29,10 @@ from sqlspec.utils.schema import to_schema if TYPE_CHECKING: - from sqlspec.core.statement import SQL from sqlspec.typing import ArrowTable, PandasDataFrame, PolarsDataFrame, SchemaT -__all__ = ("ArrowResult", "SQLResult", "StackResult", "StatementResult") +__all__ = ("ArrowResult", "EmptyResult", "SQLResult", "StackResult", "StatementResult") T = TypeVar("T") @@ -878,39 +878,69 @@ def __iter__(self) -> "Iterator[dict[str, Any]]": yield from self.data.to_pylist() +class EmptyResult(StatementResult): + """Sentinel result used when a stack operation has no driver result.""" + + __slots__ = () + _EMPTY_STATEMENT = SQL("-- empty stack result --") + + def __init__(self) -> None: + super().__init__(statement=self._EMPTY_STATEMENT, data=[], rows_affected=0) + + def __iter__(self) -> Iterator[Any]: + return iter(()) + + def is_success(self) -> bool: + return True + + def get_data(self) -> list[Any]: + return [] + + class StackResult: - """Concrete stack result wrapper that preserves the original driver result.""" + """Wrapper for per-operation stack results that surfaces driver results directly.""" - __slots__ = ("error", "metadata", "raw_result", "rowcount", "warning") + __slots__ = ("error", "metadata", "result", "rows_affected", "warning") def __init__( self, - raw_result: "StatementResult | ArrowResult | None" = None, + result: "StatementResult | ArrowResult | None" = None, *, - rowcount: int | None = None, + rows_affected: int | None = None, error: Exception | None = None, warning: Any | None = None, metadata: "dict[str, Any] | None" = None, ) -> None: - self.raw_result = raw_result - self.rowcount = rowcount if rowcount is not None else _infer_rowcount(raw_result) + self.result: StatementResult | ArrowResult = result if result is not None else EmptyResult() + self.rows_affected = rows_affected if rows_affected is not None else _infer_rows_affected(self.result) self.error = error self.warning = warning self.metadata = dict(metadata) if metadata else None - def __iter__(self) -> "Iterator[Any]": - yield from self.rows + def get_result(self) -> "StatementResult | ArrowResult": + """Return the underlying driver result.""" + + return self.result @property - def rows(self) -> "tuple[Any, ...]": - """Return cached rows from the underlying result when available.""" + def result_type(self) -> str: + """Describe the underlying result type (SQL operation, Arrow, or custom).""" + + if isinstance(self.result, ArrowResult): + return "ARROW" + if isinstance(self.result, SQLResult): + return self.result.operation_type.upper() + return type(self.result).__name__.upper() + + def is_sql_result(self) -> bool: + """Return True when the underlying result is an SQLResult.""" + + return isinstance(self.result, StatementResult) and not isinstance(self.result, ArrowResult) + + def is_arrow_result(self) -> bool: + """Return True when the underlying result is an ArrowResult.""" - if self.raw_result is None: - return () - try: - return tuple(self.raw_result) - except TypeError: # pragma: no cover - defensive fallback - return () + return isinstance(self.result, ArrowResult) def is_error(self) -> bool: """Return True when the stack operation captured an error.""" @@ -921,8 +951,8 @@ def with_error(self, error: Exception) -> "StackResult": """Return a copy of the result that records the provided error.""" return StackResult( - raw_result=self.raw_result, - rowcount=self.rowcount, + result=self.result, + rows_affected=self.rows_affected, warning=self.warning, metadata=self.metadata, error=error, @@ -934,25 +964,23 @@ def from_sql_result(cls, result: "SQLResult") -> "StackResult": metadata = dict(result.metadata) if result.metadata else None warning = metadata.get("warning") if metadata else None - return cls(raw_result=result, rowcount=result.rows_affected, warning=warning, metadata=metadata) + return cls(result=result, rows_affected=result.rows_affected, warning=warning, metadata=metadata) @classmethod def from_arrow_result(cls, result: "ArrowResult") -> "StackResult": """Create a stack result from an ArrowResult instance.""" metadata = dict(result.metadata) if result.metadata else None - return cls(raw_result=result, rowcount=result.rows_affected, metadata=metadata) + return cls(result=result, rows_affected=result.rows_affected, metadata=metadata) @classmethod def from_error(cls, error: Exception) -> "StackResult": """Create an error-only stack result.""" - return cls(raw_result=None, rowcount=0, error=error) + return cls(result=EmptyResult(), rows_affected=0, error=error) -def _infer_rowcount(result: "StatementResult | ArrowResult | None") -> int: - if result is None: - return 0 +def _infer_rows_affected(result: "StatementResult | ArrowResult") -> int: rowcount = getattr(result, "rows_affected", None) return int(rowcount) if isinstance(rowcount, int) else 0 diff --git a/sqlspec/driver/_async.py b/sqlspec/driver/_async.py index 2620c64ab..0d7497a91 100644 --- a/sqlspec/driver/_async.py +++ b/sqlspec/driver/_async.py @@ -215,7 +215,7 @@ async def execute_stack( for index, operation in enumerate(stack.operations): try: - raw_result = await self._execute_stack_operation(operation) + result = await self._execute_stack_operation(operation) except Exception as exc: # pragma: no cover - exercised via tests stack_error = StackExecutionError( index, @@ -240,7 +240,7 @@ async def execute_stack( raise stack_error from exc - results.append(StackResult(raw_result=raw_result)) + results.append(StackResult(result=result)) if continue_on_error: await self._commit_after_stack_operation_async() diff --git a/sqlspec/driver/_sync.py b/sqlspec/driver/_sync.py index 015ccdebf..d748cbcbb 100644 --- a/sqlspec/driver/_sync.py +++ b/sqlspec/driver/_sync.py @@ -213,7 +213,7 @@ def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = Fa for index, operation in enumerate(stack.operations): try: - raw_result = self._execute_stack_operation(operation) + result = self._execute_stack_operation(operation) except Exception as exc: # pragma: no cover - exercised via tests stack_error = StackExecutionError( index, @@ -238,7 +238,7 @@ def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = Fa raise stack_error from exc - results.append(StackResult(raw_result=raw_result)) + results.append(StackResult(result=result)) if continue_on_error: self._commit_after_stack_operation() diff --git a/sqlspec/protocols.py b/sqlspec/protocols.py index f2a7971b5..65f80819a 100644 --- a/sqlspec/protocols.py +++ b/sqlspec/protocols.py @@ -488,13 +488,20 @@ def select_to_arrow( class StackResultProtocol(Protocol): """Protocol describing stack execution results.""" - raw_result: Any - rowcount: int + result: Any + rows_affected: int error: Exception | None warning: Any | None metadata: Mapping[str, Any] | None + result_type: str @property def rows(self) -> Sequence[Any]: ... def is_error(self) -> bool: ... + + def is_sql_result(self) -> bool: ... + + def is_arrow_result(self) -> bool: ... + + def get_result(self) -> Any: ... diff --git a/tests/integration/test_adapters/test_adbc/test_adbc_driver.py b/tests/integration/test_adapters/test_adbc/test_adbc_driver.py index a7a4f6401..9cde53ad3 100644 --- a/tests/integration/test_adapters/test_adbc/test_adbc_driver.py +++ b/tests/integration/test_adapters/test_adbc/test_adbc_driver.py @@ -4,8 +4,8 @@ import pytest +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.adbc import AdbcDriver -from sqlspec.core import SQLResult, StatementStack from tests.integration.test_adapters.test_adbc.conftest import xfail_if_driver_missing ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -217,9 +217,9 @@ def test_adbc_postgresql_statement_stack_sequential(adbc_postgresql_session: Adb results = adbc_postgresql_session.execute_stack(stack) assert len(results) == 3 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total"] == 2 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total"] == 2 @pytest.mark.xdist_group("postgres") diff --git a/tests/integration/test_adapters/test_aiosqlite/test_driver.py b/tests/integration/test_adapters/test_aiosqlite/test_driver.py index 300162342..f96c1620e 100644 --- a/tests/integration/test_adapters/test_aiosqlite/test_driver.py +++ b/tests/integration/test_adapters/test_aiosqlite/test_driver.py @@ -7,8 +7,8 @@ import pytest +from sqlspec import SQL, SQLResult, StatementStack from sqlspec.adapters.aiosqlite import AiosqliteDriver -from sqlspec.core import SQL, SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("sqlite") ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -225,11 +225,11 @@ async def test_aiosqlite_statement_stack_sequential(aiosqlite_session: Aiosqlite results = await aiosqlite_session.execute_stack(stack) assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total"] == 2 + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total"] == 2 async def test_aiosqlite_statement_stack_continue_on_error(aiosqlite_session: AiosqliteDriver) -> None: @@ -248,9 +248,9 @@ async def test_aiosqlite_statement_stack_continue_on_error(aiosqlite_session: Ai results = await aiosqlite_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = await aiosqlite_session.execute("SELECT COUNT(*) AS total FROM test_table") assert verify.data is not None diff --git a/tests/integration/test_adapters/test_asyncmy/test_driver.py b/tests/integration/test_adapters/test_asyncmy/test_driver.py index 72ff1a1c3..a637cf6f5 100644 --- a/tests/integration/test_adapters/test_asyncmy/test_driver.py +++ b/tests/integration/test_adapters/test_asyncmy/test_driver.py @@ -11,8 +11,8 @@ import pytest from pytest_databases.docker.mysql import MySQLService +from sqlspec import SQL, SQLResult, StatementStack from sqlspec.adapters.asyncmy import AsyncmyConfig, AsyncmyDriver -from sqlspec.core import SQL, SQLResult, StatementStack from sqlspec.utils.serializers import from_json, to_json ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -178,9 +178,9 @@ async def test_asyncmy_statement_stack_sequential(asyncmy_driver: AsyncmyDriver) results = await asyncmy_driver.execute_stack(stack) assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - final_result = results[2].raw_result + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + final_result = results[2].result assert isinstance(final_result, SQLResult) data = final_result.get_data() assert data @@ -202,9 +202,9 @@ async def test_asyncmy_statement_stack_continue_on_error(asyncmy_driver: Asyncmy results = await asyncmy_driver.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = await asyncmy_driver.execute("SELECT COUNT(*) AS total FROM test_table WHERE name LIKE ?", ("mysql-%",)) assert verify.get_data()[0]["total"] == 2 diff --git a/tests/integration/test_adapters/test_asyncpg/test_driver.py b/tests/integration/test_adapters/test_asyncpg/test_driver.py index 98c920695..ec6bbd592 100644 --- a/tests/integration/test_adapters/test_asyncpg/test_driver.py +++ b/tests/integration/test_adapters/test_asyncpg/test_driver.py @@ -6,8 +6,8 @@ import pytest from pytest_databases.docker.postgres import PostgresService +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.asyncpg import AsyncpgConfig, AsyncpgDriver -from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -835,11 +835,11 @@ async def test_asyncpg_statement_stack_batch(asyncpg_session: AsyncpgDriver) -> results = await asyncpg_session.execute_stack(stack) assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total_rows"] == 2 + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total_rows"] == 2 async def test_asyncpg_statement_stack_continue_on_error(asyncpg_session: AsyncpgDriver) -> None: @@ -857,9 +857,9 @@ async def test_asyncpg_statement_stack_continue_on_error(asyncpg_session: Asyncp results = await asyncpg_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = await asyncpg_session.execute("SELECT COUNT(*) AS total FROM test_table") assert verify.data is not None diff --git a/tests/integration/test_adapters/test_bigquery/test_driver.py b/tests/integration/test_adapters/test_bigquery/test_driver.py index 802e08906..b94b0d27c 100644 --- a/tests/integration/test_adapters/test_bigquery/test_driver.py +++ b/tests/integration/test_adapters/test_bigquery/test_driver.py @@ -7,8 +7,8 @@ import pytest from pytest_databases.docker.bigquery import BigQueryService +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.bigquery import BigQueryDriver -from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -233,9 +233,9 @@ def test_bigquery_statement_stack_sequential(bigquery_session: BigQueryDriver, d results = bigquery_session.execute_stack(stack) assert len(results) == 3 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total"] == 2 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total"] == 2 def test_bigquery_statement_stack_continue_on_error(bigquery_session: BigQueryDriver, driver_test_table: str) -> None: diff --git a/tests/integration/test_adapters/test_duckdb/test_driver.py b/tests/integration/test_adapters/test_duckdb/test_driver.py index 323e454d2..7bf530f89 100644 --- a/tests/integration/test_adapters/test_duckdb/test_driver.py +++ b/tests/integration/test_adapters/test_duckdb/test_driver.py @@ -5,8 +5,8 @@ import pytest +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.duckdb import DuckDBDriver -from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("duckdb") @@ -620,11 +620,11 @@ def test_duckdb_statement_stack_sequential(duckdb_session: DuckDBDriver) -> None results = duckdb_session.execute_stack(stack) assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total"] == 2 + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total"] == 2 def test_duckdb_statement_stack_continue_on_error(duckdb_session: DuckDBDriver) -> None: @@ -642,9 +642,9 @@ def test_duckdb_statement_stack_continue_on_error(duckdb_session: DuckDBDriver) results = duckdb_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = duckdb_session.execute("SELECT COUNT(*) AS total FROM test_table") assert verify.data is not None diff --git a/tests/integration/test_adapters/test_oracledb/test_stack.py b/tests/integration/test_adapters/test_oracledb/test_stack.py index d56853d87..27f58e7bc 100644 --- a/tests/integration/test_adapters/test_oracledb/test_stack.py +++ b/tests/integration/test_adapters/test_oracledb/test_stack.py @@ -8,8 +8,8 @@ import pytest +from sqlspec import StackExecutionError, StatementStack from sqlspec.adapters.oracledb import OracleAsyncDriver, OracleSyncDriver -from sqlspec.core import StackExecutionError, StatementStack pytestmark = pytest.mark.xdist_group("oracle") @@ -77,11 +77,11 @@ async def tracking_execute_stack_native( assert call_counter["count"] == 1, "Native pipeline was not invoked" assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["name"] == "beta" + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["name"] == "beta" await oracle_async_session.execute_script(DROP_TEMPLATE.format(table_name=table_name)) @@ -108,9 +108,9 @@ async def test_async_statement_stack_continue_on_error_pipeline(oracle_async_ses results = await oracle_async_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert isinstance(results[1].error, StackExecutionError) - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify_result = await oracle_async_session.execute( f"SELECT COUNT(*) as total_rows FROM {table_name} WHERE id = :id", {"id": 2} @@ -136,9 +136,9 @@ def test_sync_statement_stack_sequential_fallback(oracle_sync_session: OracleSyn results = oracle_sync_session.execute_stack(stack) assert len(results) == 2 - assert results[0].rowcount == 1 - assert results[1].raw_result is not None - assert results[1].raw_result.data is not None - assert results[1].raw_result.data[0]["name"] == "sync-alpha" + assert results[0].rows_affected == 1 + assert results[1].result is not None + assert results[1].result.data is not None + assert results[1].result.data[0]["name"] == "sync-alpha" oracle_sync_session.execute_script(DROP_TEMPLATE.format(table_name=table_name)) diff --git a/tests/integration/test_adapters/test_psqlpy/test_driver.py b/tests/integration/test_adapters/test_psqlpy/test_driver.py index f21672f02..78e5f2720 100644 --- a/tests/integration/test_adapters/test_psqlpy/test_driver.py +++ b/tests/integration/test_adapters/test_psqlpy/test_driver.py @@ -6,8 +6,8 @@ import pytest +from sqlspec import SQL, SQLResult, StatementStack from sqlspec.adapters.psqlpy import PsqlpyDriver -from sqlspec.core import SQL, SQLResult, StatementStack if TYPE_CHECKING: pass diff --git a/tests/integration/test_adapters/test_psycopg/test_async_copy.py b/tests/integration/test_adapters/test_psycopg/test_async_copy.py index 3da2fc116..c36cd2354 100644 --- a/tests/integration/test_adapters/test_psycopg/test_async_copy.py +++ b/tests/integration/test_adapters/test_psycopg/test_async_copy.py @@ -7,8 +7,8 @@ import pytest from pytest_databases.docker.postgres import PostgresService +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.psycopg import PsycopgAsyncConfig, PsycopgAsyncDriver -from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("postgres") @@ -194,9 +194,9 @@ async def test_psycopg_async_statement_stack_continue_on_error(psycopg_async_ses results = await psycopg_async_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = await psycopg_async_session.execute("SELECT COUNT(*) AS total FROM test_table_async") assert verify.data is not None diff --git a/tests/integration/test_adapters/test_psycopg/test_driver.py b/tests/integration/test_adapters/test_psycopg/test_driver.py index 1ec2cbe99..d7b89a383 100644 --- a/tests/integration/test_adapters/test_psycopg/test_driver.py +++ b/tests/integration/test_adapters/test_psycopg/test_driver.py @@ -5,8 +5,8 @@ import pytest +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.psycopg import PsycopgSyncConfig, PsycopgSyncDriver -from sqlspec.core import SQLResult, StatementStack ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] diff --git a/tests/integration/test_adapters/test_sqlite/test_driver.py b/tests/integration/test_adapters/test_sqlite/test_driver.py index f521b6146..0fc90d033 100644 --- a/tests/integration/test_adapters/test_sqlite/test_driver.py +++ b/tests/integration/test_adapters/test_sqlite/test_driver.py @@ -5,8 +5,8 @@ import pytest +from sqlspec import SQLResult, StatementStack from sqlspec.adapters.sqlite import SqliteDriver -from sqlspec.core import SQLResult, StatementStack pytestmark = pytest.mark.xdist_group("sqlite") ParamStyle = Literal["tuple_binds", "dict_binds", "named_binds"] @@ -221,11 +221,11 @@ def test_sqlite_statement_stack_sequential(sqlite_session: SqliteDriver) -> None results = sqlite_session.execute_stack(stack) assert len(results) == 3 - assert results[0].rowcount == 1 - assert results[1].rowcount == 1 - assert results[2].raw_result is not None - assert results[2].raw_result.data is not None - assert results[2].raw_result.data[0]["total"] == 2 + assert results[0].rows_affected == 1 + assert results[1].rows_affected == 1 + assert results[2].result is not None + assert results[2].result.data is not None + assert results[2].result.data[0]["total"] == 2 def test_sqlite_statement_stack_continue_on_error(sqlite_session: SqliteDriver) -> None: @@ -244,9 +244,9 @@ def test_sqlite_statement_stack_continue_on_error(sqlite_session: SqliteDriver) results = sqlite_session.execute_stack(stack, continue_on_error=True) assert len(results) == 3 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert results[1].error is not None - assert results[2].rowcount == 1 + assert results[2].rows_affected == 1 verify = sqlite_session.execute("SELECT COUNT(*) AS total FROM test_table") assert verify.data is not None diff --git a/tests/integration/test_stack_edge_cases.py b/tests/integration/test_stack_edge_cases.py index bde083ae7..79bafc0c6 100644 --- a/tests/integration/test_stack_edge_cases.py +++ b/tests/integration/test_stack_edge_cases.py @@ -4,8 +4,8 @@ import pytest +from sqlspec import StatementStack from sqlspec.adapters.sqlite import SqliteConfig, SqliteDriver -from sqlspec.core import StatementStack from sqlspec.exceptions import StackExecutionError pytestmark = pytest.mark.xdist_group("sqlite") @@ -50,7 +50,7 @@ def test_single_operation_stack_matches_execute(sqlite_stack_session: "SqliteDri results = sqlite_stack_session.execute_stack(stack) assert len(results) == 1 - assert results[0].rowcount == 1 + assert results[0].rows_affected == 1 assert _table_count(sqlite_stack_session) == 1 @@ -68,8 +68,8 @@ def test_stack_with_only_select_operations(sqlite_stack_session: "SqliteDriver") results = sqlite_stack_session.execute_stack(stack) - first_result = results[0].raw_result - second_result = results[1].raw_result + first_result = results[0].result + second_result = results[1].result assert first_result is not None assert second_result is not None assert first_result.data is not None @@ -89,7 +89,7 @@ def test_large_stack_of_mixed_operations(sqlite_stack_session: "SqliteDriver") - results = sqlite_stack_session.execute_stack(stack) assert len(results) == 51 - final_result = results[-1].raw_result + final_result = results[-1].result assert final_result is not None assert final_result.data is not None assert final_result.data[0]["total"] == 50 @@ -135,7 +135,7 @@ def test_parameter_edge_cases(sqlite_stack_session: "SqliteDriver") -> None: ) results = sqlite_stack_session.execute_stack(stack) - third_result = results[2].raw_result + third_result = results[2].result assert third_result is not None assert third_result.data is not None assert third_result.data[0]["notes"] is None @@ -175,7 +175,7 @@ def test_stack_single_statement_selects_inside_existing_transaction(sqlite_stack stack = StatementStack().push_execute("SELECT name FROM stack_edge_table WHERE id = ?", (1,)) results = sqlite_stack_session.execute_stack(stack) - select_result = results[0].raw_result + select_result = results[0].result assert select_result is not None assert select_result.data is not None assert select_result.data[0]["name"] == "pre" diff --git a/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py b/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py index caf638dce..97baa1156 100644 --- a/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py +++ b/tests/unit/test_adapters/test_oracledb/test_pipeline_helpers.py @@ -6,9 +6,9 @@ pytest.importorskip("oracledb") +from sqlspec import StatementStack from sqlspec.adapters.oracledb._types import OracleAsyncConnection from sqlspec.adapters.oracledb.driver import OracleAsyncDriver, oracledb_statement_config -from sqlspec.core import StatementStack from sqlspec.driver._common import StackExecutionObserver @@ -87,12 +87,12 @@ def test_pipeline_result_to_stack_result_uses_rowcount_attr() -> None: stack_result = driver._pipeline_result_to_stack_result(compiled, pipeline_result) - assert stack_result.rowcount == 7 + assert stack_result.rows_affected == 7 assert stack_result.warning == "warn" - raw_result = stack_result.raw_result - assert raw_result is not None - assert raw_result.metadata is not None - assert raw_result.metadata["pipeline_operation"] == "execute" + result = stack_result.result + assert result is not None + assert result.metadata is not None + assert result.metadata["pipeline_operation"] == "execute" def test_pipeline_result_execute_many_rowcount_fallback() -> None: @@ -103,7 +103,7 @@ def test_pipeline_result_execute_many_rowcount_fallback() -> None: stack_result = driver._pipeline_result_to_stack_result(compiled, pipeline_result) - assert stack_result.rowcount == 2 + assert stack_result.rows_affected == 2 def test_build_stack_results_records_errors() -> None: diff --git a/tests/unit/test_core/test_result.py b/tests/unit/test_core/test_result.py index 4d2419dcd..66413762a 100644 --- a/tests/unit/test_core/test_result.py +++ b/tests/unit/test_core/test_result.py @@ -206,25 +206,27 @@ def test_stack_result_from_sql_result() -> None: stack_result = StackResult.from_sql_result(sql_result) - assert stack_result.rowcount == 1 + assert stack_result.rows_affected == 1 assert stack_result.warning == "slow" - assert stack_result.raw_result is sql_result - assert list(stack_result.rows) == [{"id": 1}] + assert stack_result.result is sql_result + assert stack_result.get_result() is not None + assert stack_result.get_result().get_data() == [{"id": 1}] def test_stack_result_with_error_and_factory() -> None: sql_stmt = SQL("SELECT 1") sql_result = SQLResult(statement=sql_stmt, data=[{"value": 1}], rows_affected=1) - stack_result = StackResult(raw_result=sql_result) + stack_result = StackResult(result=sql_result) updated = stack_result.with_error(ValueError("boom")) assert updated.error is not None - assert updated.raw_result is sql_result - assert updated.rows == stack_result.rows + assert updated.result is sql_result + assert list(updated.get_result()) == list(stack_result.get_result()) failure = StackResult.from_error(RuntimeError("stack")) assert failure.is_error() - assert list(failure) == [] + assert failure.is_sql_result() + assert failure.get_result().get_data() == [] def test_sql_result_all_with_schema_type() -> None: diff --git a/tests/unit/test_core/test_stack.py b/tests/unit/test_core/test_stack.py index d3943c439..1579bf439 100644 --- a/tests/unit/test_core/test_stack.py +++ b/tests/unit/test_core/test_stack.py @@ -4,7 +4,7 @@ import pytest -from sqlspec.core import StackOperation, StatementConfig, StatementStack +from sqlspec import StackOperation, StatementConfig, StatementStack pytestmark = pytest.mark.xdist_group("core") diff --git a/tests/unit/test_driver/test_stack_base.py b/tests/unit/test_driver/test_stack_base.py index 5fe9a537c..24e1c10ac 100644 --- a/tests/unit/test_driver/test_stack_base.py +++ b/tests/unit/test_driver/test_stack_base.py @@ -63,7 +63,7 @@ async def fake_select_to_arrow(self, statement, *params, **kwargs): # type: ign results = await mock_async_driver.execute_stack(stack) assert len(results) == 1 - assert results[0].raw_result is sentinel + assert results[0].result is sentinel def test_sync_execute_stack_fail_fast_rolls_back(mock_sync_driver) -> None: @@ -118,4 +118,4 @@ def fake_select_to_arrow(self, statement, *params, **kwargs): # type: ignore[no results = mock_sync_driver.execute_stack(stack) assert len(results) == 1 - assert results[0].raw_result is sentinel + assert results[0].result is sentinel From bc3da8cb67422fec943d82922bd5f52b45d838c4 Mon Sep 17 00:00:00 2001 From: Cody Fincher Date: Fri, 14 Nov 2025 21:29:26 +0000 Subject: [PATCH 10/10] fix: update alembic version to 1.17.2 and adjust related package details --- uv.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/uv.lock b/uv.lock index af3809b40..2ae2406e9 100644 --- a/uv.lock +++ b/uv.lock @@ -343,7 +343,7 @@ wheels = [ [[package]] name = "alembic" -version = "1.17.1" +version = "1.17.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mako" }, @@ -351,9 +351,9 @@ dependencies = [ { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6e/b6/2a81d7724c0c124edc5ec7a167e85858b6fd31b9611c6fb8ecf617b7e2d3/alembic-1.17.1.tar.gz", hash = "sha256:8a289f6778262df31571d29cca4c7fbacd2f0f582ea0816f4c399b6da7528486", size = 1981285, upload-time = "2025-10-29T00:23:16.667Z" } +sdist = { url = "https://files.pythonhosted.org/packages/02/a6/74c8cadc2882977d80ad756a13857857dbcf9bd405bc80b662eb10651282/alembic-1.17.2.tar.gz", hash = "sha256:bbe9751705c5e0f14877f02d46c53d10885e377e3d90eda810a016f9baa19e8e", size = 1988064, upload-time = "2025-11-14T20:35:04.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/32/7df1d81ec2e50fb661944a35183d87e62d3f6c6d9f8aff64a4f245226d55/alembic-1.17.1-py3-none-any.whl", hash = "sha256:cbc2386e60f89608bb63f30d2d6cc66c7aaed1fe105bd862828600e5ad167023", size = 247848, upload-time = "2025-10-29T00:23:18.79Z" }, + { url = "https://files.pythonhosted.org/packages/ba/88/6237e97e3385b57b5f1528647addea5cc03d4d65d5979ab24327d41fb00d/alembic-1.17.2-py3-none-any.whl", hash = "sha256:f483dd1fe93f6c5d49217055e4d15b905b425b6af906746abb35b69c1996c4e6", size = 248554, upload-time = "2025-11-14T20:35:05.699Z" }, ] [[package]]