diff --git a/flixOpt/__init__.py b/flixOpt/__init__.py index 4b469173b..856997201 100644 --- a/flixOpt/__init__.py +++ b/flixOpt/__init__.py @@ -3,4 +3,4 @@ """ from .commons import * -setup_logging('INFO', use_rich_handler=False) +CONFIG.load_config() diff --git a/flixOpt/commons.py b/flixOpt/commons.py index c01209b56..4d9717077 100644 --- a/flixOpt/commons.py +++ b/flixOpt/commons.py @@ -2,7 +2,8 @@ This module makes the commonly used classes and functions available in the flixOpt framework. """ -from .core import setup_logging, change_logging_level, TimeSeriesData +from .core import TimeSeriesData +from .config import CONFIG, change_logging_level from .elements import Flow, Bus from .effects import Effect diff --git a/flixOpt/components.py b/flixOpt/components.py index 0eac4fb22..adb4ca26a 100644 --- a/flixOpt/components.py +++ b/flixOpt/components.py @@ -9,7 +9,7 @@ from . import utils from .elements import Flow, _create_time_series -from .core import Skalar, Numeric_TS, TimeSeries, Numeric +from .core import Skalar, Numeric, Numeric_TS, TimeSeries from .math_modeling import VariableTS, Equation from .features import OnOffModel, MultipleSegmentsModel, InvestmentModel from .structure import SystemModel, create_equation, create_variable diff --git a/flixOpt/config.py b/flixOpt/config.py new file mode 100644 index 000000000..8ee824f0c --- /dev/null +++ b/flixOpt/config.py @@ -0,0 +1,248 @@ +import os +import types +from typing import Optional, Literal, Annotated +import logging +from dataclasses import dataclass, is_dataclass, fields + +import yaml +from rich.logging import RichHandler +from rich.console import Console + +logger = logging.getLogger('flixOpt') + + +def merge_configs(defaults: dict, overrides: dict) -> dict: + """ + Merge the default configuration with user-provided overrides. + + :param defaults: Default configuration dictionary. + :param overrides: User configuration dictionary. + :return: Merged configuration dictionary. + """ + for key, value in overrides.items(): + if isinstance(value, dict) and key in defaults and isinstance(defaults[key], dict): + # Recursively merge nested dictionaries + defaults[key] = merge_configs(defaults[key], value) + else: + # Override the default value + defaults[key] = value + return defaults + + +def dataclass_from_dict_with_validation(cls, data: dict): + """ + Recursively initialize a dataclass from a dictionary. + """ + if not is_dataclass(cls): + raise TypeError(f"{cls} must be a dataclass") + + # Build kwargs for the dataclass constructor + kwargs = {} + for field in fields(cls): + field_name = field.name + field_type = field.type + field_value = data.get(field_name) + + # If the field type is a dataclass and the value is a dict, recursively initialize + if is_dataclass(field_type) and isinstance(field_value, dict): + kwargs[field_name] = dataclass_from_dict_with_validation(field_type, field_value) + else: + kwargs[field_name] = field_value # Pass as-is if no special handling is needed + + return cls(**kwargs) + + +@dataclass() +class ValidatedConfig: + def __setattr__(self, name, value): + if field := self.__dataclass_fields__.get(name): + if metadata := getattr(field.type, '__metadata__', None): + assert metadata[0](value), f'Invalid value passed to {name!r}: {value=}' + super().__setattr__(name, value) + + +@dataclass +class LoggingConfig(ValidatedConfig): + level: Annotated[Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], lambda level: level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']] + file: Annotated[str, lambda file: isinstance(file, str)] + rich: Annotated[bool, lambda rich: isinstance(rich, bool)] + + +@dataclass +class ModelingConfig(ValidatedConfig): + BIG: Annotated[int, lambda x: isinstance(x, int)] + EPSILON: Annotated[float, lambda x: isinstance(x, float)] + BIG_BINARY_BOUND: Annotated[int, lambda x: isinstance(x, int)] + + +@dataclass +class ConfigSchema(ValidatedConfig): + config_name: Annotated[str, lambda x: isinstance(x, str)] + logging: LoggingConfig + modeling: ModelingConfig + + +class CONFIG: + """ + A configuration class that stores global configuration values as class attributes. + """ + config_name: str = None + modeling: ModelingConfig = None + logging: LoggingConfig = None + + @classmethod + def load_config(cls, user_config_file: Optional[str] = None): + """ + Initialize configuration using defaults or user-specified file. + """ + # Default config file + default_config_path = os.path.join(os.path.dirname(__file__), "config.yaml") + + if user_config_file is None: + with open(default_config_path, "r") as file: + new_config = yaml.safe_load(file) + elif not os.path.exists(user_config_file): + raise FileNotFoundError(f"Config file not found: {user_config_file}") + else: + with open(user_config_file, "r") as user_file: + new_config = yaml.safe_load(user_file) + + # Convert the merged config to ConfigSchema + config_data = dataclass_from_dict_with_validation(ConfigSchema, new_config) + + # Store the configuration in the class as class attributes + cls.logging = config_data.logging + cls.modeling = config_data.modeling + cls.config_name = config_data.config_name + + setup_logging(default_level=cls.logging.level, + log_file=cls.logging.file, + use_rich_handler=cls.logging.rich) + + @classmethod + def to_dict(cls): + """ + Convert the configuration class into a dictionary for JSON serialization. + Handles dataclasses and simple types like str, int, etc. + """ + config_dict = {} + for attribute, value in cls.__dict__.items(): + # Only consider attributes (not methods, etc.) + if not attribute.startswith("_") and not isinstance(value, (types.FunctionType, types.MethodType)) and not isinstance(value, classmethod): + if is_dataclass(value): + config_dict[attribute] = value.__dict__ + else: # Assuming only basic types here! + config_dict[attribute] = value + + return config_dict + + +class MultilineFormater(logging.Formatter): + + def format(self, record): + message_lines = record.getMessage().split('\n') + + # Prepare the log prefix (timestamp + log level) + timestamp = self.formatTime(record, self.datefmt) + log_level = record.levelname.ljust(8) # Align log levels for consistency + log_prefix = f"{timestamp} | {log_level} |" + + # Format all lines + first_line = [f'{log_prefix} {message_lines[0]}'] + if len(message_lines) > 1: + lines = first_line + [f"{log_prefix} {line}" for line in message_lines[1:]] + else: + lines = first_line + + return '\n'.join(lines) + + +class ColoredMultilineFormater(MultilineFormater): + # ANSI escape codes for colors + COLORS = { + 'DEBUG': '\033[32m', # Green + 'INFO': '\033[34m', # Blue + 'WARNING': '\033[33m', # Yellow + 'ERROR': '\033[31m', # Red + 'CRITICAL': '\033[1m\033[31m', # Bold Red + } + RESET = '\033[0m' + + def format(self, record): + lines = super().format(record).splitlines() + log_color = self.COLORS.get(record.levelname, self.RESET) + + # Create a formatted message for each line separately + formatted_lines = [] + for line in lines: + formatted_lines.append(f"{log_color}{line}{self.RESET}") + + return '\n'.join(formatted_lines) + + +def _get_logging_handler(log_file: Optional[str] = None, + use_rich_handler: bool = False) -> logging.Handler: + """Returns a logging handler for the given log file.""" + if use_rich_handler and log_file is None: + # RichHandler for console output + console = Console(width=120) + rich_handler = RichHandler( + console=console, + rich_tracebacks=True, + omit_repeated_times=True, + show_path=False, + log_time_format="%Y-%m-%d %H:%M:%S", + ) + rich_handler.setFormatter(logging.Formatter("%(message)s")) # Simplified formatting + + return rich_handler + elif log_file is None: + # Regular Logger with custom formating enabled + file_handler = logging.StreamHandler() + file_handler.setFormatter(ColoredMultilineFormater( + fmt="%(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + )) + return file_handler + else: + # FileHandler for file output + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(MultilineFormater( + fmt="%(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + )) + return file_handler + + +def setup_logging(default_level: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] = 'INFO', + log_file: Optional[str] = 'flixOpt.log', + use_rich_handler: bool = False): + """Setup logging configuration""" + logger = logging.getLogger('flixOpt') # Use a specific logger name for your package + logger.setLevel(get_logging_level_by_name(default_level)) + # Clear existing handlers + if logger.hasHandlers(): + logger.handlers.clear() + + logger.addHandler(_get_logging_handler(use_rich_handler=use_rich_handler)) + if log_file is not None: + logger.addHandler(_get_logging_handler(log_file, use_rich_handler=False)) + + return logger + + +def get_logging_level_by_name(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']) -> int: + possible_logging_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] + if level_name.upper() not in possible_logging_levels: + raise ValueError(f'Invalid logging level {level_name}') + else: + logging_level = getattr(logging, level_name.upper(), logging.WARNING) + return logging_level + + +def change_logging_level(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']): + logger = logging.getLogger('flixOpt') + logging_level = get_logging_level_by_name(level_name) + logger.setLevel(logging_level) + for handler in logger.handlers: + handler.setLevel(logging_level) diff --git a/flixOpt/config.yaml b/flixOpt/config.yaml new file mode 100644 index 000000000..cd2d649ae --- /dev/null +++ b/flixOpt/config.yaml @@ -0,0 +1,10 @@ +# Default configuration of flixOpt +config_name: flixOpt # Name of the config file. This has no effect on the configuration itself. +logging: + level: INFO + file: flixOpt.log + rich: false # logging output is formatted using rich. This is only advisable when using a proper terminal +modeling: + BIG: 10000000 # 1e notation not possible in yaml + EPSILON: 0.00001 + BIG_BINARY_BOUND: 100000 diff --git a/flixOpt/core.py b/flixOpt/core.py index 3c3baffa6..5452fab29 100644 --- a/flixOpt/core.py +++ b/flixOpt/core.py @@ -3,13 +3,11 @@ It provides Datatypes, logging functionality, and some functions to transform data structures. """ -from typing import Union, Optional, List, Dict, Any, Literal +from typing import Union, Optional, List, Dict, Any import logging import inspect import numpy as np -from rich.logging import RichHandler -from rich.console import Console from . import utils @@ -17,57 +15,6 @@ Skalar = Union[int, float] # Datatype Numeric = Union[int, float, np.ndarray] # Datatype -# zeitreihenbezogene Input-Daten: -Numeric_TS = Union[Skalar, np.ndarray, 'TimeSeries'] -# Datatype Numeric_TS: -# Skalar --> wird später dann in array ("Zeitreihe" mit length=nrOfTimeIndexe) übersetzt -# np.ndarray --> muss length=nrOfTimeIndexe haben ("Zeitreihe") -# TimeSeriesData --> wie obige aber zusätzliche Übergabe aggWeight (für Aggregation) - - -class Config: - """ - Configuration class for global settings. - The values are used as defaults in several classes. - They can be overwritten by the user via the .update() - method. - Use with care, and make sure to adjust them in the beginning of the script. - """ - BIG_M: Union[int, float] = 1e7 - EPSILON: Union[int, float] = 1e-5 - OFFSET_TO_BIG_M: Union[int, float] = 100 - BIG_BINARY_BOUND: Union[int, float] = BIG_M / OFFSET_TO_BIG_M - - @classmethod - def update(cls, big_m: Optional[int] = None, - epsilon: Optional[float] = None, - offset_to_big_m: Optional[int] = None, - big_binary_bound: Optional[int] = None) -> None: - """ - Update the configuration with the given values. - ----- - Parameters - ----------- - big_m: int, optional - The value of the big M constant. Defaults to 1e7. - epsilon: float, optional - The value of the epsilon constant. Defaults to 1e-5. - offset_to_big_m: int, optional - The value of the offset to big M constant for the big binary bound. Defaults to 100. - big_binary_bound: int, optional - The value of the big binary bound. Defaults to the value of big M minus the offset to big M. - Use either this or the offset! - """ - if big_binary_bound is not None and offset_to_big_m is not None: - raise ValueError(f'Either use "offset_to_big_m" or set the "big_binary_bound" directly. Not Both') - if big_m is not None: - cls.BIG_M = big_m - if epsilon is not None: - cls.EPSILON = epsilon - if big_binary_bound is not None: - cls.BIG_BINARY_BOUND = big_binary_bound - if offset_to_big_m is not None: - cls.BIG_BINARY_BOUND = cls.BIG_M / offset_to_big_m - class TimeSeriesData: # TODO: Move to Interface.py @@ -124,6 +71,9 @@ def __str__(self): return str(self.data) +Numeric_TS = Union[Skalar, np.ndarray, TimeSeriesData] # TODO: This is not really correct throughozt the codebase. Sometimes its used for TimeSeries aswell? + + class TimeSeries: """ Class for data that applies to time series, stored as vector (np.ndarray) or scalar. @@ -320,113 +270,3 @@ def as_effect_dict_with_ts(name_of_param: str, effect_dict = as_effect_dict(effect_values) effect_ts_dict = effect_values_to_ts(name_of_param, effect_dict, owner) return effect_ts_dict - - -class MultilineFormater(logging.Formatter): - - def format(self, record): - message_lines = record.getMessage().split('\n') - - # Prepare the log prefix (timestamp + log level) - timestamp = self.formatTime(record, self.datefmt) - log_level = record.levelname.ljust(8) # Align log levels for consistency - log_prefix = f"{timestamp} | {log_level} |" - - # Format all lines - first_line = [f'{log_prefix} {message_lines[0]}'] - if len(message_lines) > 1: - lines = first_line + [f"{log_prefix} {line}" for line in message_lines[1:]] - else: - lines = first_line - - return '\n'.join(lines) - - -class ColoredMultilineFormater(MultilineFormater): - # ANSI escape codes for colors - COLORS = { - 'DEBUG': '\033[32m', # Green - 'INFO': '\033[34m', # Blue - 'WARNING': '\033[33m', # Yellow - 'ERROR': '\033[31m', # Red - 'CRITICAL': '\033[1m\033[31m', # Bold Red - } - RESET = '\033[0m' - - def format(self, record): - lines = super().format(record).splitlines() - log_color = self.COLORS.get(record.levelname, self.RESET) - - # Create a formatted message for each line separately - formatted_lines = [] - for line in lines: - formatted_lines.append(f"{log_color}{line}{self.RESET}") - - return '\n'.join(formatted_lines) - - -def _get_logging_handler(log_file: Optional[str] = None, - use_rich_handler: bool = False) -> logging.Handler: - """Returns a logging handler for the given log file.""" - if use_rich_handler and log_file is None: - # RichHandler for console output - console = Console(width=120) - rich_handler = RichHandler( - console=console, - rich_tracebacks=True, - omit_repeated_times=True, - show_path=False, - log_time_format="%Y-%m-%d %H:%M:%S", - ) - rich_handler.setFormatter(logging.Formatter("%(message)s")) # Simplified formatting - - return rich_handler - elif log_file is None: - # Regular Logger with custom formating enabled - file_handler = logging.StreamHandler() - file_handler.setFormatter(ColoredMultilineFormater( - fmt="%(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - )) - return file_handler - else: - # FileHandler for file output - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter(MultilineFormater( - fmt="%(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - )) - return file_handler - -def setup_logging(default_level: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] = 'INFO', - log_file: Optional[str] = 'flixOpt.log', - use_rich_handler: bool = False): - """Setup logging configuration""" - logger = logging.getLogger('flixOpt') # Use a specific logger name for your package - logger.setLevel(get_logging_level_by_name(default_level)) - # Clear existing handlers - if logger.hasHandlers(): - logger.handlers.clear() - - logger.addHandler(_get_logging_handler(use_rich_handler=use_rich_handler)) - if log_file is not None: - logger.addHandler(_get_logging_handler(log_file, use_rich_handler=False)) - - return logger - - -def get_logging_level_by_name(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']) -> int: - possible_logging_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] - if level_name.upper() not in possible_logging_levels: - raise ValueError(f'Invalid logging level {level_name}') - else: - logging_level = getattr(logging, level_name.upper(), logging.WARNING) - return logging_level - - -def change_logging_level(level_name: Literal['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']): - logger = logging.getLogger('flixOpt') - logging_level = get_logging_level_by_name(level_name) - logger.setLevel(logging_level) - for handler in logger.handlers: - handler.setLevel(logging_level) diff --git a/flixOpt/elements.py b/flixOpt/elements.py index bc77fafd2..69c2aa33a 100644 --- a/flixOpt/elements.py +++ b/flixOpt/elements.py @@ -8,7 +8,8 @@ import numpy as np from .math_modeling import Variable, VariableTS -from .core import Numeric, Numeric_TS, Skalar, Config +from .core import Numeric, Numeric_TS, Skalar +from .config import CONFIG from .interface import InvestParameters, OnOffParameters from .features import OnOffModel, InvestmentModel, PreventSimultaneousUsageModel from .structure import SystemModel, Element, ElementModel, _create_time_series, create_equation, create_variable, \ @@ -202,7 +203,7 @@ def __init__(self, previous flow rate of the component. """ super().__init__(label, meta_data=meta_data) - self.size = size or Config.BIG_M # Default size + self.size = size or CONFIG.modeling.BIG # Default size self.relative_minimum = relative_minimum self.relative_maximum = relative_maximum self.fixed_relative_profile = fixed_relative_profile @@ -246,7 +247,7 @@ def _plausibility_checks(self) -> None: if np.any(self.relative_minimum > self.relative_maximum): raise Exception(self.label_full + ': Take care, that relative_minimum <= relative_maximum!') - if self.size == Config.BIG_M and self.fixed_relative_profile is not None: # Default Size --> Most likely by accident + if self.size == CONFIG.modeling.BIG and self.fixed_relative_profile is not None: # Default Size --> Most likely by accident raise Exception('Achtung: Wenn fixed_relative_profile genutzt wird, muss zugehöriges size definiert werden, ' 'da: value = fixed_relative_profile * size!') diff --git a/flixOpt/features.py b/flixOpt/features.py index 2e98369c2..2c13ea082 100644 --- a/flixOpt/features.py +++ b/flixOpt/features.py @@ -9,7 +9,8 @@ import numpy as np from .math_modeling import Variable, VariableTS, Equation -from .core import TimeSeries, Skalar, Numeric, Config +from .core import TimeSeries, Skalar, Numeric +from .config import CONFIG from .interface import InvestParameters, OnOffParameters from .structure import ElementModel, SystemModel, Element, create_equation, create_variable @@ -119,7 +120,7 @@ def _create_bounds_for_optional_investment(self, system_model: SystemModel): # eq2: P_invest >= isInvested * max(epsilon, investSize_min) eq_is_invested_lb = create_equation('is_invested_lb', self, 'ineq') eq_is_invested_lb.add_summand(self.size, -1) - eq_is_invested_lb.add_summand(self.is_invested, np.maximum(Config.EPSILON, self._invest_parameters.minimum_size)) + eq_is_invested_lb.add_summand(self.is_invested, np.maximum(CONFIG.modeling.EPSILON, self._invest_parameters.minimum_size)) def _create_bounds_for_defining_variable(self, system_model: SystemModel): label = self._defining_variable.label @@ -191,7 +192,7 @@ def __init__(self, def do_modeling(self, system_model: SystemModel): self.on = create_variable('on', self, system_model.nr_of_time_steps, is_binary=True, - previous_values=self._previous_on_values(Config.EPSILON)) + previous_values=self._previous_on_values(CONFIG.modeling.EPSILON)) self.total_on_hours = create_variable('totalOnHours', self, 1, lower_bound=self._on_off_parameters.on_hours_total_min, @@ -204,7 +205,7 @@ def do_modeling(self, system_model: SystemModel): if self._on_off_parameters.use_off: self.off = create_variable('off', self, system_model.nr_of_time_steps, is_binary=True, - previous_values=1 - self._previous_on_values(Config.EPSILON)) + previous_values=1 - self._previous_on_values(CONFIG.modeling.EPSILON)) self._add_off_constraints(system_model, system_model.indices) @@ -248,7 +249,7 @@ def _add_on_constraints(self, system_model: SystemModel, time_indices: Union[lis #### Bedingung 1) #### # eq: On(t) * max(epsilon, lower_bound) <= Q_th(t) eq_on_1.add_summand(variable, -1, time_indices) - eq_on_1.add_summand(self.on, np.maximum(Config.EPSILON, lower_bound), time_indices) + eq_on_1.add_summand(self.on, np.maximum(CONFIG.modeling.EPSILON, lower_bound), time_indices) #### Bedingung 2) #### # eq: Q_th(t) <= Q_th_max * On(t) @@ -261,7 +262,7 @@ def _add_on_constraints(self, system_model: SystemModel, time_indices: Union[lis # eq: - sum(alle Leistungen(t)) + Epsilon * On(t) <= 0 for variable in self._defining_variables: eq_on_1.add_summand(variable, -1, time_indices) - eq_on_1.add_summand(self.on, Config.EPSILON, time_indices) + eq_on_1.add_summand(self.on, CONFIG.modeling.EPSILON, time_indices) #### Bedingung 2) #### ## sum(alle Leistung) >0 -> On = 1 | On=0 -> sum(Leistung)=0 @@ -276,7 +277,7 @@ def _add_on_constraints(self, system_model: SystemModel, time_indices: Union[lis upper_bound = absolute_maximum / nr_of_defining_variables eq_on_2.add_summand(self.on, -1 * upper_bound, time_indices) - if np.max(upper_bound) > Config.BIG_BINARY_BOUND: + if np.max(upper_bound) > CONFIG.modeling.BIG_BINARY_BOUND: logger.warning( f'In "{self.element.label_full}", a binary definition was created with a big upper bound ' f'({np.max(upper_bound)}). This can lead to wrong results regarding the on and off variables. ' diff --git a/flixOpt/interface.py b/flixOpt/interface.py index d183ae647..09f343032 100644 --- a/flixOpt/interface.py +++ b/flixOpt/interface.py @@ -6,7 +6,8 @@ import logging from typing import Union, Optional, Dict, List, Tuple, TYPE_CHECKING -from .core import Numeric, Skalar, Numeric_TS, Config +from .core import Numeric, Skalar, Numeric_TS +from .config import CONFIG from .structure import get_object_infos_as_str, get_object_infos_as_dict if TYPE_CHECKING: from .structure import Element @@ -71,7 +72,7 @@ def __init__(self, self.specific_effects: EffectValuesInvest = specific_effects self.effects_in_segments = effects_in_segments self._minimum_size = minimum_size - self._maximum_size = maximum_size or Config.BIG_M # default maximum + self._maximum_size = maximum_size or CONFIG.modeling.BIG # default maximum def transform_data(self): from .effects import as_effect_dict diff --git a/flixOpt/structure.py b/flixOpt/structure.py index 1cd7f5cd8..51f950b15 100644 --- a/flixOpt/structure.py +++ b/flixOpt/structure.py @@ -13,6 +13,7 @@ from . import utils from .math_modeling import MathModel, Variable, Equation, Inequation, VariableTS, Solver from .core import TimeSeries, Skalar, Numeric, Numeric_TS, TimeSeriesData +from .config import CONFIG if TYPE_CHECKING: # for type checking and preventing circular imports from .flow_system import FlowSystem @@ -166,6 +167,7 @@ def infos(self) -> Dict: infos['Constraints'] = self.description_of_constraints() infos['Variables'] = self.description_of_variables() infos['Main Results'] = self.main_results + infos['Config'] = CONFIG.to_dict() return infos @property diff --git a/pyproject.toml b/pyproject.toml index 17048ba56..279732d37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,3 +58,6 @@ repository = "https://github.com/flixOpt/flixOpt" [tool.setuptools.packages.find] where = ["."] exclude = ["tests", "docs", "examples", "examples.*", "Tutorials", ".git", ".vscode", "build", ".venv", "venv/"] + +[tool.setuptools.package-data] +"flixOpt" = ["config.yaml"] \ No newline at end of file diff --git a/tests/test_integration.py b/tests/test_integration.py index 76cf4a88a..73a6aabcb 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -17,7 +17,7 @@ class BaseTest(unittest.TestCase): def setUp(self): - setup_logging("DEBUG") + change_logging_level("DEBUG") def get_solver(self): return solvers.HighsSolver(mip_gap=0.0001, time_limit_seconds=3600, solver_output_to_console=False)