Skip to content

Commit

Permalink
exception cleanup (#6347)
Browse files Browse the repository at this point in the history
* starting to move jinja exceptions

* convert some exceptions

* add back old functions for backward compatibility

* organize

* more conversions

* more conversions

* add changelog

* split out CacheInconsistency

* more conversions

* convert even more

* convert parsingexceptions

* fix tests

* more conversions

* more conversions

* finish converting exception functions

* convert more tests

* standardize to msg

* remove some TODOs

* fix test param and check the rest

* add comment, move exceptions

* add types

* fix type errors

* fix type for adapter_response

* remove 0.13 version from message
  • Loading branch information
emmyoop committed Dec 20, 2022
1 parent b9bdb77 commit 304797b
Show file tree
Hide file tree
Showing 45 changed files with 2,349 additions and 1,074 deletions.
9 changes: 9 additions & 0 deletions .changes/unreleased/Breaking Changes-20221205-141937.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
kind: Breaking Changes
body: Cleaned up exceptions to directly raise in code. Removed use of all exception
functions in the code base and marked them all as deprecated to be removed next
minor release.
time: 2022-12-05T14:19:37.863032-06:00
custom:
Author: emmyoop
Issue: "6339"
PR: "6347"
70 changes: 26 additions & 44 deletions core/dbt/adapters/base/impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,20 @@
import pytz

from dbt.exceptions import (
raise_database_error,
raise_compiler_error,
invalid_type_error,
get_relation_returned_multiple_results,
InternalException,
InvalidMacroArgType,
InvalidMacroResult,
InvalidQuoteConfigType,
NotImplementedException,
NullRelationCacheAttempted,
NullRelationDropAttempted,
RelationReturnedMultipleResults,
RenameToNoneAttempted,
RuntimeException,
SnapshotTargetIncomplete,
SnapshotTargetNotSnapshotTable,
UnexpectedNull,
UnexpectedNonTimestamp,
)

from dbt.adapters.protocol import (
Expand Down Expand Up @@ -97,18 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet
assume the datetime is already for UTC and add the timezone.
"""
if dt is None:
raise raise_database_error(
"Expected a non-null value when querying field '{}' of table "
" {} but received value 'null' instead".format(field_name, source)
)
raise UnexpectedNull(field_name, source)

elif not hasattr(dt, "tzinfo"):
raise raise_database_error(
"Expected a timestamp value when querying field '{}' of table "
"{} but received value of type '{}' instead".format(
field_name, source, type(dt).__name__
)
)
raise UnexpectedNonTimestamp(field_name, source, dt)

elif dt.tzinfo:
return dt.astimezone(pytz.UTC)
Expand Down Expand Up @@ -434,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str:
"""Cache a new relation in dbt. It will show up in `list relations`."""
if relation is None:
name = self.nice_connection_name()
raise_compiler_error("Attempted to cache a null relation for {}".format(name))
raise NullRelationCacheAttempted(name)
self.cache.add(relation)
# so jinja doesn't render things
return ""
Expand All @@ -446,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str:
"""
if relation is None:
name = self.nice_connection_name()
raise_compiler_error("Attempted to drop a null relation for {}".format(name))
raise NullRelationDropAttempted(name)
self.cache.drop(relation)
return ""

Expand All @@ -463,9 +462,7 @@ def cache_renamed(
name = self.nice_connection_name()
src_name = _relation_name(from_relation)
dst_name = _relation_name(to_relation)
raise_compiler_error(
"Attempted to rename {} to {} for {}".format(src_name, dst_name, name)
)
raise RenameToNoneAttempted(src_name, dst_name, name)

self.cache.rename(from_relation, to_relation)
return ""
Expand Down Expand Up @@ -615,15 +612,15 @@ def get_missing_columns(
to_relation.
"""
if not isinstance(from_relation, self.Relation):
invalid_type_error(
raise InvalidMacroArgType(
method_name="get_missing_columns",
arg_name="from_relation",
got_value=from_relation,
expected_type=self.Relation,
)

if not isinstance(to_relation, self.Relation):
invalid_type_error(
raise InvalidMacroArgType(
method_name="get_missing_columns",
arg_name="to_relation",
got_value=to_relation,
Expand All @@ -648,7 +645,7 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:
incorrect.
"""
if not isinstance(relation, self.Relation):
invalid_type_error(
raise InvalidMacroArgType(
method_name="valid_snapshot_target",
arg_name="relation",
got_value=relation,
Expand All @@ -669,32 +666,24 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:

if missing:
if extra:
msg = (
'Snapshot target has ("{}") but not ("{}") - is it an '
"unmigrated previous version archive?".format(
'", "'.join(extra), '", "'.join(missing)
)
)
raise SnapshotTargetIncomplete(extra, missing)
else:
msg = 'Snapshot target is not a snapshot table (missing "{}")'.format(
'", "'.join(missing)
)
raise_compiler_error(msg)
raise SnapshotTargetNotSnapshotTable(missing)

@available.parse_none
def expand_target_column_types(
self, from_relation: BaseRelation, to_relation: BaseRelation
) -> None:
if not isinstance(from_relation, self.Relation):
invalid_type_error(
raise InvalidMacroArgType(
method_name="expand_target_column_types",
arg_name="from_relation",
got_value=from_relation,
expected_type=self.Relation,
)

if not isinstance(to_relation, self.Relation):
invalid_type_error(
raise InvalidMacroArgType(
method_name="expand_target_column_types",
arg_name="to_relation",
got_value=to_relation,
Expand Down Expand Up @@ -776,7 +765,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[
"schema": schema,
"database": database,
}
get_relation_returned_multiple_results(kwargs, matches)
raise RelationReturnedMultipleResults(kwargs, matches)

elif matches:
return matches[0]
Expand Down Expand Up @@ -840,10 +829,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str:
elif quote_config is None:
pass
else:
raise_compiler_error(
f'The seed configuration value of "quote_columns" has an '
f"invalid type {type(quote_config)}"
)
raise InvalidQuoteConfigType(quote_config)

if quote_columns:
return self.quote(column)
Expand Down Expand Up @@ -1093,11 +1079,7 @@ def calculate_freshness(
# now we have a 1-row table of the maximum `loaded_at_field` value and
# the current time according to the db.
if len(table) != 1 or len(table[0]) != 2:
raise_compiler_error(
'Got an invalid result from "{}" macro: {}'.format(
FRESHNESS_MACRO_NAME, [tuple(r) for r in table]
)
)
raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table)
if table[0][0] is None:
# no records in the table, so really the max_loaded_at was
# infinitely long ago. Just call it 0:00 January 1 year UTC
Expand Down
6 changes: 3 additions & 3 deletions core/dbt/adapters/base/relation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
Policy,
Path,
)
from dbt.exceptions import InternalException
from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed
from dbt.node_types import NodeType
from dbt.utils import filter_null_values, deep_merge, classproperty

Expand Down Expand Up @@ -100,7 +100,7 @@ def matches(

if approximate_match and not exact_match:
target = self.create(database=database, schema=schema, identifier=identifier)
dbt.exceptions.approximate_relation_match(target, self)
raise ApproximateMatch(target, self)

return exact_match

Expand Down Expand Up @@ -438,7 +438,7 @@ def flatten(self, allow_multiple_databases: bool = False):
if not allow_multiple_databases:
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
raise MultipleDatabasesNotAllowed(seen)

for information_schema_name, schema in self.search():
path = {"database": information_schema_name.database, "schema": schema}
Expand Down
46 changes: 12 additions & 34 deletions core/dbt/adapters/cache.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import re
import threading
from copy import deepcopy
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
Expand All @@ -9,7 +8,13 @@
_make_msg_from_ref_key,
_ReferenceKey,
)
import dbt.exceptions
from dbt.exceptions import (
DependentLinkNotCached,
NewNameAlreadyInCache,
NoneRelationFound,
ReferencedLinkNotCached,
TruncatedModelNameCausedCollision,
)
from dbt.events.functions import fire_event, fire_event_if
from dbt.events.types import (
AddLink,
Expand Down Expand Up @@ -150,11 +155,7 @@ def rename_key(self, old_key, new_key):
:raises InternalError: If the new key already exists.
"""
if new_key in self.referenced_by:
dbt.exceptions.raise_cache_inconsistent(
'in rename of "{}" -> "{}", new name is in the cache already'.format(
old_key, new_key
)
)
raise NewNameAlreadyInCache(old_key, new_key)

if old_key not in self.referenced_by:
return
Expand Down Expand Up @@ -270,15 +271,11 @@ def _add_link(self, referenced_key, dependent_key):
if referenced is None:
return
if referenced is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, referenced link key {} not in cache!".format(referenced_key)
)
raise ReferencedLinkNotCached(referenced_key)

dependent = self.relations.get(dependent_key)
if dependent is None:
dbt.exceptions.raise_cache_inconsistent(
"in add_link, dependent link key {} not in cache!".format(dependent_key)
)
raise DependentLinkNotCached(dependent_key)

assert dependent is not None # we just raised!

Expand Down Expand Up @@ -430,24 +427,7 @@ def _check_rename_constraints(self, old_key, new_key):
if new_key in self.relations:
# Tell user when collision caused by model names truncated during
# materialization.
match = re.search("__dbt_backup|__dbt_tmp$", new_key.identifier)
if match:
truncated_model_name_prefix = new_key.identifier[: match.start()]
message_addendum = (
"\n\nName collisions can occur when the length of two "
"models' names approach your database's builtin limit. "
"Try restructuring your project such that no two models "
"share the prefix '{}'.".format(truncated_model_name_prefix)
+ " Then, clean your warehouse of any removed models."
)
else:
message_addendum = ""

dbt.exceptions.raise_cache_inconsistent(
"in rename, new key {} already in cache: {}{}".format(
new_key, list(self.relations.keys()), message_addendum
)
)
raise TruncatedModelNameCausedCollision(new_key, self.relations)

if old_key not in self.relations:
fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key)))
Expand Down Expand Up @@ -505,9 +485,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[
]

if None in results:
dbt.exceptions.raise_cache_inconsistent(
"in get_relations, a None relation was found in the cache!"
)
raise NoneRelationFound()
return results

def clear(self):
Expand Down
7 changes: 2 additions & 5 deletions core/dbt/adapters/sql/impl.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import agate
from typing import Any, Optional, Tuple, Type, List

import dbt.clients.agate_helper
from dbt.contracts.connection import Connection
import dbt.exceptions
from dbt.exceptions import RelationTypeNull
from dbt.adapters.base import BaseAdapter, available
from dbt.adapters.cache import _make_ref_key_msg
from dbt.adapters.sql import SQLConnectionManager
Expand Down Expand Up @@ -132,9 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None:

def drop_relation(self, relation):
if relation.type is None:
dbt.exceptions.raise_compiler_error(
"Tried to drop relation {}, but its type is null.".format(relation)
)
raise RelationTypeNull(relation)

self.cache_dropped(relation)
self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
Expand Down
Loading

0 comments on commit 304797b

Please sign in to comment.