diff --git a/CHANGELOG.md b/CHANGELOG.md index 294892f6144..163cc9eebb9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Changed - FlowDB now enables partitionwise aggregation planning by default +- FlowDB now triggers an ANALYZE on newly created cache tables to generate statistics rather than waiting for autovacuum ### Fixed - Queries that have multiple of the same subquery with different parameters no longer cause duplicate scopes in tokens. [#6580](https://github.com/Flowminder/FlowKit/issues/6580) diff --git a/flowmachine/flowmachine/core/cache.py b/flowmachine/flowmachine/core/cache.py index cecd13aa0a8..12962c90101 100644 --- a/flowmachine/flowmachine/core/cache.py +++ b/flowmachine/flowmachine/core/cache.py @@ -135,6 +135,7 @@ def write_query_to_cache( ddl_ops_func: Callable[[str, str], List[str]], schema: Optional[str] = "cache", sleep_duration: Optional[int] = 1, + analyze=True, ) -> "Query": """ Write a Query object into a postgres table and update the cache metadata about it. @@ -159,6 +160,8 @@ def write_query_to_cache( Name of the schema to write to sleep_duration : int, default 1 Number of seconds to wait between polls when monitoring a query being written from elsewhere + analyze : bool, default True + Set to False to _disable_ running analyze on the newly created table to generate statistics Returns ------- @@ -203,6 +206,10 @@ def write_query_to_cache( except Exception as exc: logger.error(f"Error executing SQL. Error was {exc}") raise exc + if analyze: + logger.debug(f"Running analyze for {schema}.{name}.") + trans.exec_driver_sql(f"ANALYZE {schema}.{name};") + logger.debug(f"Ran analyze for {schema}.{name}.") if schema == "cache": try: write_cache_metadata(