From 9416b243da149b2b381043e06d74f609e16b9857 Mon Sep 17 00:00:00 2001 From: taimo Date: Tue, 10 Dec 2024 23:57:45 +0900 Subject: [PATCH 01/15] add stacking model & config --- src/pytorch_tabular/models/__init__.py | 5 + .../models/stacking/__init__.py | 5 + src/pytorch_tabular/models/stacking/config.py | 27 +++ .../models/stacking/stacking_model.py | 162 +++++++++++++ src/pytorch_tabular/models/tabnet/config.py | 1 + tests/test_model_stacking.py | 227 ++++++++++++++++++ 6 files changed, 427 insertions(+) create mode 100644 src/pytorch_tabular/models/stacking/__init__.py create mode 100644 src/pytorch_tabular/models/stacking/config.py create mode 100644 src/pytorch_tabular/models/stacking/stacking_model.py create mode 100644 tests/test_model_stacking.py diff --git a/src/pytorch_tabular/models/__init__.py b/src/pytorch_tabular/models/__init__.py index 0ae80b8e..91cb8f54 100644 --- a/src/pytorch_tabular/models/__init__.py +++ b/src/pytorch_tabular/models/__init__.py @@ -21,6 +21,7 @@ from .node import NodeConfig, NODEModel from .tab_transformer import TabTransformerConfig, TabTransformerModel from .tabnet import TabNetModel, TabNetModelConfig +from .stacking import StackingModel, StackingModelConfig __all__ = [ "CategoryEmbeddingModel", @@ -45,6 +46,8 @@ "GANDALFBackbone", "DANetConfig", "DANetModel", + "StackingModel", + "StackingModelConfig", "category_embedding", "node", "mixture_density", @@ -55,4 +58,6 @@ "gate", "gandalf", "danet", + "stacking", ] + diff --git a/src/pytorch_tabular/models/stacking/__init__.py b/src/pytorch_tabular/models/stacking/__init__.py new file mode 100644 index 00000000..1c60a59e --- /dev/null +++ b/src/pytorch_tabular/models/stacking/__init__.py @@ -0,0 +1,5 @@ +from .config import StackingModelConfig +from .stacking_model import StackingModel, StackingBackbone + + +__all__ = ["StackingModel", "StackingModelConfig", "StackingBackbone"] diff --git a/src/pytorch_tabular/models/stacking/config.py b/src/pytorch_tabular/models/stacking/config.py new file mode 100644 index 00000000..b4b46d48 --- /dev/null +++ b/src/pytorch_tabular/models/stacking/config.py @@ -0,0 +1,27 @@ +from dataclasses import dataclass, field +from pytorch_tabular.config import ModelConfig +from pytorch_tabular.models.category_embedding.config import CategoryEmbeddingModelConfig + + +@dataclass +class StackingModelConfig(ModelConfig): + """ + StackingModelConfig is a configuration class for the StackingModel. + It is used to stack multiple models together. + Now, CategoryEmbeddingModel, TabNetModel, FTTransformerModel, GatedAdditiveTreeEnsembleModel, DANetModel, AutoIntModel, GANDALFModel, NodeModel are supported. + + Args: + model_configs (list[ModelConfig]): List of model configs to stack. + """ + model_configs: list[ModelConfig] = field( + default_factory=list, metadata={"help": "List of model configs to stack"} + ) + _module_src: str = field(default="models.stacking") + _model_name: str = field(default="StackingModel") + _backbone_name: str = field(default="StackingBackbone") + _config_name: str = field(default="StackingConfig") + + +#if __name__ == "__main__": +# from pytorch_tabular.utils import generate_doc_dataclass +# print(generate_doc_dataclass(StackingModelConfig)) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py new file mode 100644 index 00000000..b1c77243 --- /dev/null +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -0,0 +1,162 @@ +import torch +import torch.nn as nn +from dataclasses import fields +from omegaconf import DictConfig +from pytorch_tabular.models import BaseModel +from pytorch_tabular.models.common.heads import blocks +import inspect + +from pytorch_tabular.config import ModelConfig +from pytorch_tabular.models.category_embedding import CategoryEmbeddingBackbone, CategoryEmbeddingModelConfig, CategoryEmbeddingModel +from pytorch_tabular.models.ft_transformer import FTTransformerBackbone, FTTransformerConfig +from pytorch_tabular.models.node import NODEBackbone, NodeConfig +from pytorch_tabular.models.tab_transformer import TabTransformerBackbone +from pytorch_tabular.models.gate import GatedAdditiveTreesBackbone +from pytorch_tabular.models.tabnet import TabNetBackbone +from pytorch_tabular.models.danet import DANetBackbone +from pytorch_tabular.models.gandalf import GANDALFBackbone +from pytorch_tabular.models.autoint import AutoIntBackbone + + +def instatiate_backbone(hparams, backbone_name): + backbone_class = eval(backbone_name) + class_args = list(inspect.signature(backbone_class).parameters.keys()) + if "config" in class_args: + return backbone_class( + config=hparams + ) + else: + return backbone_class(**{ + arg: getattr(hparams, arg) + if arg != "block_activation" else getattr(nn, getattr(hparams, arg))() + for arg in class_args + }) + +class StackingBackbone(nn.Module): + def __init__(self, config: DictConfig): + super().__init__() + self.hparams = config + self._build_network() + + def _build_network(self): + self._backbones = nn.ModuleList() + self._heads = nn.ModuleList() + self._backbone_output_dims = [] + assert ( + len(self.hparams.model_configs) > 0 + ), "Stacking requires more than 0 model" + for model_i in range(len(self.hparams.model_configs)): + # move necessary params to each model config + self.hparams.model_configs[ + model_i + ].embedded_cat_dim = self.hparams.embedded_cat_dim + self.hparams.model_configs[ + model_i + ].continuous_dim = self.hparams.continuous_dim + self.hparams.model_configs[ + model_i + ].n_continuous_features = self.hparams.continuous_dim + + self.hparams.model_configs[ + model_i + ].embedding_dims = self.hparams.embedding_dims + self.hparams.model_configs[ + model_i + ].categorical_cardinality = self.hparams.categorical_cardinality + self.hparams.model_configs[ + model_i + ].categorical_dim = self.hparams.categorical_dim + self.hparams.model_configs[ + model_i + ].cat_embedding_dims = self.hparams.embedding_dims + + # if output_dim is not set, set it to 128 + if getattr(self.hparams.model_configs[model_i], "output_dim", None) is None: + self.hparams.model_configs[model_i].output_dim = 128 + + # if inferred_config is not set, set it to None. + if getattr(self.hparams, "inferred_config", None) is not None: + self.hparams.model_configs[ + model_i + ].inferred_config = self.hparams.inferred_config + + # instantiate backbone + _backbone = instatiate_backbone( + self.hparams.model_configs[model_i], + self.hparams.model_configs[model_i]._backbone_name + ) + # set continuous_dim + _backbone.continuous_dim = self.hparams.continuous_dim + # if output_dim is not set, set it to the output_dim in model_config + if getattr(_backbone, "output_dim", None) is None: + setattr( + _backbone, + "output_dim", + self.hparams.model_configs[model_i].output_dim, + ) + self._backbones.append(_backbone) + self._backbone_output_dims.append(_backbone.output_dim) + + self.output_dim = sum(self._backbone_output_dims) + + def _build_embedding_layer(self): + assert getattr(self, "_backbones", None) is not None, "Backbones are not built" + embedding_layers = nn.ModuleList() + for backbone in self._backbones: + if getattr(backbone, "_build_embedding_layer", None) is None: + embedding_layers.append(nn.Identity()) + else: + embedding_layers.append(backbone._build_embedding_layer()) + return embedding_layers + + def forward(self, x_list: list[torch.Tensor]): + outputs = [] + for i, backbone in enumerate(self._backbones): + bb_output = backbone(x_list[i]) + if len(bb_output.shape) == 3 and isinstance(backbone, GatedAdditiveTreesBackbone): + bb_output = bb_output.mean(dim=-1) + elif len(bb_output.shape) == 3 and isinstance(backbone, NODEBackbone): + bb_output = bb_output.mean(dim=1) + outputs.append(bb_output) + x = torch.cat(outputs, dim=1) + return x + + +class StackingModel(BaseModel): + def __init__(self, config: DictConfig, **kwargs): + super().__init__(config, **kwargs) + + def _build_network(self): + self._backbone = StackingBackbone(self.hparams) + self._embedding_layer = self._backbone._build_embedding_layer() + self.output_dim = self._backbone.output_dim + self._head = self._get_head_from_config() + + def _get_head_from_config(self): + _head_callable = getattr(blocks, self.hparams.head) + return _head_callable( + in_units=self.output_dim, + output_dim=self.hparams.output_dim, + config=_head_callable._config_template(**self.hparams.head_config), + ) + + @property + def backbone(self): + return self._backbone + + @property + def embedding_layer(self): + return self._embedding_layer + + @property + def head(self): + return self._head + + def forward(self, x: dict[str, torch.Tensor]): + outputs = [] + for embedding_layer in self._embedding_layer: + em_output = embedding_layer(x) + outputs.append(em_output) + outputs = self._backbone(outputs) + x = self._head(outputs) + return {"logits": x} diff --git a/src/pytorch_tabular/models/tabnet/config.py b/src/pytorch_tabular/models/tabnet/config.py index ade0c6a0..c1142273 100644 --- a/src/pytorch_tabular/models/tabnet/config.py +++ b/src/pytorch_tabular/models/tabnet/config.py @@ -129,6 +129,7 @@ class TabNetModelConfig(ModelConfig): _module_src: str = field(default="models.tabnet") _model_name: str = field(default="TabNetModel") _config_name: str = field(default="TabNetModelConfig") + _backbone_name: str = field(default="TabNetBackbone") # if __name__ == "__main__": diff --git a/tests/test_model_stacking.py b/tests/test_model_stacking.py new file mode 100644 index 00000000..2030786f --- /dev/null +++ b/tests/test_model_stacking.py @@ -0,0 +1,227 @@ +import pytest +import numpy as np +from sklearn.preprocessing import PowerTransformer +import torch + +from pytorch_tabular import TabularModel +from pytorch_tabular.models.stacking import StackingModelConfig +from pytorch_tabular.models.category_embedding import CategoryEmbeddingModelConfig +from pytorch_tabular.models.tabnet import TabNetModelConfig +from pytorch_tabular.models.ft_transformer import FTTransformerConfig +from pytorch_tabular.models.gate import GatedAdditiveTreeEnsembleConfig +from pytorch_tabular.models.danet import DANetConfig +from pytorch_tabular.models.node import NodeConfig +from pytorch_tabular.models.gandalf import GANDALFConfig +from pytorch_tabular.models.autoint import AutoIntConfig +from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer +from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig + +def fake_metric(y_hat, y): + return (y_hat - y).mean() + +def get_model_configs(task): + all_model_configs = [ + lambda task: CategoryEmbeddingModelConfig( + task=task, + ), + lambda task: TabNetModelConfig( + task=task, + ), + lambda task: FTTransformerConfig( + task=task, + ), + lambda task: GatedAdditiveTreeEnsembleConfig( + task=task, + ), + lambda task: DANetConfig( + task=task, + ), + lambda task: AutoIntConfig( + task=task, + ), + lambda task: GANDALFConfig( + task=task, + ), + lambda task: GatedAdditiveTreeEnsembleConfig( + task=task, + ), + lambda task: NodeConfig( + task=task, + ), + ] + return [model_config(task) for model_config in all_model_configs] + + +@pytest.mark.parametrize("multi_target", [True, False]) +@pytest.mark.parametrize( + "continuous_cols", + [ + [ + "AveRooms", + "AveBedrms", + "Population", + "AveOccup", + "Latitude", + "Longitude", + ], + [], + ], +) +@pytest.mark.parametrize("categorical_cols", [["HouseAgeBin"], []]) +@pytest.mark.parametrize("continuous_feature_transform", [None, "yeo-johnson"]) +@pytest.mark.parametrize("normalize_continuous_features", [True, False]) +@pytest.mark.parametrize("target_range", [True, False]) +@pytest.mark.parametrize( + "target_transform", + [None, PowerTransformer(), (lambda x: np.power(x, 2), lambda x: np.sqrt(x))], +) +@pytest.mark.parametrize("virtual_bz", [None, 32]) +# @pytest.mark.parametrize("custom_loss", [None, torch.nn.L1Loss()]) +# @pytest.mark.parametrize("custom_optimizer", [None, torch.optim.Adagrad]) +@pytest.mark.parametrize( + "custom_args", [(None, None, None, None), ([fake_metric], [False], torch.nn.L1Loss(), torch.optim.Adagrad)] +) +@pytest.mark.parametrize("custom_head_config", [None, "", "32", "32-32"]) +@pytest.mark.parametrize("model_configs", [get_model_configs("regression")]) +def test_regression( + regression_data, + multi_target, + continuous_cols, + categorical_cols, + continuous_feature_transform, + normalize_continuous_features, + target_range, + target_transform, + virtual_bz, + # custom_metrics, + # custom_loss, + # custom_optimizer, + custom_args, + custom_head_config, + model_configs, +): + (train, test, target) = regression_data + (custom_metrics, custom_metrics_prob_input, custom_loss, custom_optimizer) = custom_args + if len(continuous_cols) + len(categorical_cols) == 0: + return + + data_config = DataConfig( + target=target + ["MedInc"] if multi_target else target, + continuous_cols=continuous_cols, + categorical_cols=categorical_cols, + continuous_feature_transform=continuous_feature_transform, + normalize_continuous_features=normalize_continuous_features, + ) + model_config_params = {"task": "regression", "virtual_batch_size": virtual_bz} + + if target_range: + _target_range = [] + for target in data_config.target: + _target_range.append( + ( + float(train[target].min()), + float(train[target].max()), + ) + ) + model_config_params["target_range"] = _target_range + if custom_head_config is not None: + model_config_params["head"] = "LinearHead" + model_config_params["head_config"] = {"layers": custom_head_config} + + model_config_params["model_configs"] = model_configs + model_config = StackingModelConfig(**model_config_params) + trainer_config = TrainerConfig( + max_epochs=3, + checkpoints=None, + early_stopping=None, + accelerator="cpu", + fast_dev_run=True, + ) + optimizer_config = OptimizerConfig() + + tabular_model = TabularModel( + data_config=data_config, + model_config=model_config, + optimizer_config=optimizer_config, + trainer_config=trainer_config, + ) + tabular_model.fit( + train=train, + metrics=custom_metrics, + metrics_prob_inputs=custom_metrics_prob_input, + target_transform=target_transform, + loss=custom_loss, + optimizer=custom_optimizer, + optimizer_params={}, + ) + + result = tabular_model.evaluate(test) + # print(result[0]["valid_loss"]) + if custom_metrics is None: + assert "test_mean_squared_error" in result[0].keys() + else: + assert "test_fake_metric" in result[0].keys() + pred_df = tabular_model.predict(test) + assert pred_df.shape[0] == test.shape[0] + + +@pytest.mark.parametrize("multi_target", [False, True]) +@pytest.mark.parametrize( + "continuous_cols", + [ + [f"feature_{i}" for i in range(54)], + [], + ], +) +@pytest.mark.parametrize("categorical_cols", [["feature_0_cat"], []]) +@pytest.mark.parametrize("continuous_feature_transform", [None]) +@pytest.mark.parametrize("normalize_continuous_features", [True]) +@pytest.mark.parametrize("model_configs", [get_model_configs("classification")]) +def test_classification( + classification_data, + multi_target, + continuous_cols, + categorical_cols, + continuous_feature_transform, + normalize_continuous_features, + model_configs, +): + (train, test, target) = classification_data + if len(continuous_cols) + len(categorical_cols) == 0: + return + + data_config = DataConfig( + target=target + ["feature_53"] if multi_target else target, + continuous_cols=continuous_cols, + categorical_cols=categorical_cols, + continuous_feature_transform=continuous_feature_transform, + normalize_continuous_features=normalize_continuous_features, + ) + model_config_params = {"task": "classification"} + + model_config_params["model_configs"] = model_configs + model_config = StackingModelConfig(**model_config_params) + trainer_config = TrainerConfig( + max_epochs=3, + checkpoints=None, + early_stopping=None, + accelerator="cpu", + fast_dev_run=True, + ) + optimizer_config = OptimizerConfig() + + tabular_model = TabularModel( + data_config=data_config, + model_config=model_config, + optimizer_config=optimizer_config, + trainer_config=trainer_config, + ) + tabular_model.fit(train=train) + + result = tabular_model.evaluate(test) + # print(result[0]["valid_loss"]) + assert "test_accuracy" in result[0].keys() + pred_df = tabular_model.predict(test) + assert pred_df.shape[0] == test.shape[0] + + From 3d616e28952d831a88a60b17f8267fcd69872421 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:05:26 +0000 Subject: [PATCH 02/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pytorch_tabular/models/__init__.py | 3 +- .../models/stacking/__init__.py | 3 +- src/pytorch_tabular/models/stacking/config.py | 17 ++--- .../models/stacking/stacking_model.py | 73 ++++++------------- tests/test_model_stacking.py | 21 +++--- 5 files changed, 43 insertions(+), 74 deletions(-) diff --git a/src/pytorch_tabular/models/__init__.py b/src/pytorch_tabular/models/__init__.py index 91cb8f54..e4d3353b 100644 --- a/src/pytorch_tabular/models/__init__.py +++ b/src/pytorch_tabular/models/__init__.py @@ -19,9 +19,9 @@ from .gate import GatedAdditiveTreeEnsembleConfig, GatedAdditiveTreeEnsembleModel from .mixture_density import MDNConfig, MDNModel from .node import NodeConfig, NODEModel +from .stacking import StackingModel, StackingModelConfig from .tab_transformer import TabTransformerConfig, TabTransformerModel from .tabnet import TabNetModel, TabNetModelConfig -from .stacking import StackingModel, StackingModelConfig __all__ = [ "CategoryEmbeddingModel", @@ -60,4 +60,3 @@ "danet", "stacking", ] - diff --git a/src/pytorch_tabular/models/stacking/__init__.py b/src/pytorch_tabular/models/stacking/__init__.py index 1c60a59e..ca69b8ae 100644 --- a/src/pytorch_tabular/models/stacking/__init__.py +++ b/src/pytorch_tabular/models/stacking/__init__.py @@ -1,5 +1,4 @@ from .config import StackingModelConfig -from .stacking_model import StackingModel, StackingBackbone - +from .stacking_model import StackingBackbone, StackingModel __all__ = ["StackingModel", "StackingModelConfig", "StackingBackbone"] diff --git a/src/pytorch_tabular/models/stacking/config.py b/src/pytorch_tabular/models/stacking/config.py index b4b46d48..23be156e 100644 --- a/src/pytorch_tabular/models/stacking/config.py +++ b/src/pytorch_tabular/models/stacking/config.py @@ -1,27 +1,26 @@ from dataclasses import dataclass, field + from pytorch_tabular.config import ModelConfig -from pytorch_tabular.models.category_embedding.config import CategoryEmbeddingModelConfig @dataclass class StackingModelConfig(ModelConfig): - """ - StackingModelConfig is a configuration class for the StackingModel. - It is used to stack multiple models together. - Now, CategoryEmbeddingModel, TabNetModel, FTTransformerModel, GatedAdditiveTreeEnsembleModel, DANetModel, AutoIntModel, GANDALFModel, NodeModel are supported. + """StackingModelConfig is a configuration class for the StackingModel. It is used to stack multiple models + together. Now, CategoryEmbeddingModel, TabNetModel, FTTransformerModel, GatedAdditiveTreeEnsembleModel, DANetModel, + AutoIntModel, GANDALFModel, NodeModel are supported. Args: model_configs (list[ModelConfig]): List of model configs to stack. + """ - model_configs: list[ModelConfig] = field( - default_factory=list, metadata={"help": "List of model configs to stack"} - ) + + model_configs: list[ModelConfig] = field(default_factory=list, metadata={"help": "List of model configs to stack"}) _module_src: str = field(default="models.stacking") _model_name: str = field(default="StackingModel") _backbone_name: str = field(default="StackingBackbone") _config_name: str = field(default="StackingConfig") -#if __name__ == "__main__": +# if __name__ == "__main__": # from pytorch_tabular.utils import generate_doc_dataclass # print(generate_doc_dataclass(StackingModelConfig)) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index b1c77243..8c97f037 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -1,36 +1,28 @@ +import inspect + import torch import torch.nn as nn -from dataclasses import fields from omegaconf import DictConfig + from pytorch_tabular.models import BaseModel from pytorch_tabular.models.common.heads import blocks -import inspect - -from pytorch_tabular.config import ModelConfig -from pytorch_tabular.models.category_embedding import CategoryEmbeddingBackbone, CategoryEmbeddingModelConfig, CategoryEmbeddingModel -from pytorch_tabular.models.ft_transformer import FTTransformerBackbone, FTTransformerConfig -from pytorch_tabular.models.node import NODEBackbone, NodeConfig -from pytorch_tabular.models.tab_transformer import TabTransformerBackbone from pytorch_tabular.models.gate import GatedAdditiveTreesBackbone -from pytorch_tabular.models.tabnet import TabNetBackbone -from pytorch_tabular.models.danet import DANetBackbone -from pytorch_tabular.models.gandalf import GANDALFBackbone -from pytorch_tabular.models.autoint import AutoIntBackbone +from pytorch_tabular.models.node import NODEBackbone def instatiate_backbone(hparams, backbone_name): backbone_class = eval(backbone_name) class_args = list(inspect.signature(backbone_class).parameters.keys()) if "config" in class_args: + return backbone_class(config=hparams) + else: return backbone_class( - config=hparams + **{ + arg: getattr(hparams, arg) if arg != "block_activation" else getattr(nn, getattr(hparams, arg))() + for arg in class_args + } ) - else: - return backbone_class(**{ - arg: getattr(hparams, arg) - if arg != "block_activation" else getattr(nn, getattr(hparams, arg))() - for arg in class_args - }) + class StackingBackbone(nn.Module): def __init__(self, config: DictConfig): @@ -42,48 +34,29 @@ def _build_network(self): self._backbones = nn.ModuleList() self._heads = nn.ModuleList() self._backbone_output_dims = [] - assert ( - len(self.hparams.model_configs) > 0 - ), "Stacking requires more than 0 model" + assert len(self.hparams.model_configs) > 0, "Stacking requires more than 0 model" for model_i in range(len(self.hparams.model_configs)): # move necessary params to each model config - self.hparams.model_configs[ - model_i - ].embedded_cat_dim = self.hparams.embedded_cat_dim - self.hparams.model_configs[ - model_i - ].continuous_dim = self.hparams.continuous_dim - self.hparams.model_configs[ - model_i - ].n_continuous_features = self.hparams.continuous_dim - - self.hparams.model_configs[ - model_i - ].embedding_dims = self.hparams.embedding_dims - self.hparams.model_configs[ - model_i - ].categorical_cardinality = self.hparams.categorical_cardinality - self.hparams.model_configs[ - model_i - ].categorical_dim = self.hparams.categorical_dim - self.hparams.model_configs[ - model_i - ].cat_embedding_dims = self.hparams.embedding_dims - + self.hparams.model_configs[model_i].embedded_cat_dim = self.hparams.embedded_cat_dim + self.hparams.model_configs[model_i].continuous_dim = self.hparams.continuous_dim + self.hparams.model_configs[model_i].n_continuous_features = self.hparams.continuous_dim + + self.hparams.model_configs[model_i].embedding_dims = self.hparams.embedding_dims + self.hparams.model_configs[model_i].categorical_cardinality = self.hparams.categorical_cardinality + self.hparams.model_configs[model_i].categorical_dim = self.hparams.categorical_dim + self.hparams.model_configs[model_i].cat_embedding_dims = self.hparams.embedding_dims + # if output_dim is not set, set it to 128 if getattr(self.hparams.model_configs[model_i], "output_dim", None) is None: self.hparams.model_configs[model_i].output_dim = 128 # if inferred_config is not set, set it to None. if getattr(self.hparams, "inferred_config", None) is not None: - self.hparams.model_configs[ - model_i - ].inferred_config = self.hparams.inferred_config + self.hparams.model_configs[model_i].inferred_config = self.hparams.inferred_config # instantiate backbone _backbone = instatiate_backbone( - self.hparams.model_configs[model_i], - self.hparams.model_configs[model_i]._backbone_name + self.hparams.model_configs[model_i], self.hparams.model_configs[model_i]._backbone_name ) # set continuous_dim _backbone.continuous_dim = self.hparams.continuous_dim diff --git a/tests/test_model_stacking.py b/tests/test_model_stacking.py index 2030786f..d5ea9709 100644 --- a/tests/test_model_stacking.py +++ b/tests/test_model_stacking.py @@ -1,24 +1,25 @@ -import pytest import numpy as np -from sklearn.preprocessing import PowerTransformer +import pytest import torch +from sklearn.preprocessing import PowerTransformer from pytorch_tabular import TabularModel -from pytorch_tabular.models.stacking import StackingModelConfig +from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig +from pytorch_tabular.models.autoint import AutoIntConfig from pytorch_tabular.models.category_embedding import CategoryEmbeddingModelConfig -from pytorch_tabular.models.tabnet import TabNetModelConfig +from pytorch_tabular.models.danet import DANetConfig from pytorch_tabular.models.ft_transformer import FTTransformerConfig +from pytorch_tabular.models.gandalf import GANDALFConfig from pytorch_tabular.models.gate import GatedAdditiveTreeEnsembleConfig -from pytorch_tabular.models.danet import DANetConfig from pytorch_tabular.models.node import NodeConfig -from pytorch_tabular.models.gandalf import GANDALFConfig -from pytorch_tabular.models.autoint import AutoIntConfig -from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer -from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig +from pytorch_tabular.models.stacking import StackingModelConfig +from pytorch_tabular.models.tabnet import TabNetModelConfig + def fake_metric(y_hat, y): return (y_hat - y).mean() + def get_model_configs(task): all_model_configs = [ lambda task: CategoryEmbeddingModelConfig( @@ -223,5 +224,3 @@ def test_classification( assert "test_accuracy" in result[0].keys() pred_df = tabular_model.predict(test) assert pred_df.shape[0] == test.shape[0] - - From 43f80d10f60fd4dc8d30e4674c1116ac8e36aebe Mon Sep 17 00:00:00 2001 From: taimo Date: Wed, 11 Dec 2024 22:10:55 +0900 Subject: [PATCH 03/15] refactor: Add StackingEmbeddingLayer to delete "forward" from StackingModel --- .../models/stacking/stacking_model.py | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index 8c97f037..a0c1c47e 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -23,6 +23,17 @@ def instatiate_backbone(hparams, backbone_name): } ) +class StackingEmbeddingLayer(nn.Module): + def __init__(self, embedding_layers: nn.ModuleList): + super().__init__() + self.embedding_layers = embedding_layers + + def forward(self, x: dict[str, torch.Tensor]): + outputs = [] + for embedding_layer in self.embedding_layers: + em_output = embedding_layer(x) + outputs.append(em_output) + return outputs class StackingBackbone(nn.Module): def __init__(self, config: DictConfig): @@ -80,7 +91,7 @@ def _build_embedding_layer(self): embedding_layers.append(nn.Identity()) else: embedding_layers.append(backbone._build_embedding_layer()) - return embedding_layers + return StackingEmbeddingLayer(embedding_layers) def forward(self, x_list: list[torch.Tensor]): outputs = [] @@ -124,12 +135,3 @@ def embedding_layer(self): @property def head(self): return self._head - - def forward(self, x: dict[str, torch.Tensor]): - outputs = [] - for embedding_layer in self._embedding_layer: - em_output = embedding_layer(x) - outputs.append(em_output) - outputs = self._backbone(outputs) - x = self._head(outputs) - return {"logits": x} From 0e8a5324f1b1d5ae9d241829d6a1551dddef4117 Mon Sep 17 00:00:00 2001 From: taimo Date: Wed, 11 Dec 2024 22:14:12 +0900 Subject: [PATCH 04/15] refactor: remove the use of eval for passing ruff format. --- src/pytorch_tabular/models/stacking/stacking_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index a0c1c47e..8fab00d3 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -4,6 +4,7 @@ import torch.nn as nn from omegaconf import DictConfig +import pytorch_tabular.models as models from pytorch_tabular.models import BaseModel from pytorch_tabular.models.common.heads import blocks from pytorch_tabular.models.gate import GatedAdditiveTreesBackbone @@ -11,7 +12,7 @@ def instatiate_backbone(hparams, backbone_name): - backbone_class = eval(backbone_name) + backbone_class = getattr(getattr(models, hparams._module_src.split(".")[-1]), backbone_name) class_args = list(inspect.signature(backbone_class).parameters.keys()) if "config" in class_args: return backbone_class(config=hparams) From 5a452c9beb0077d976beccfdbb2995fec3f3664a Mon Sep 17 00:00:00 2001 From: taimo Date: Wed, 11 Dec 2024 22:15:24 +0900 Subject: [PATCH 05/15] fix typo --- src/pytorch_tabular/models/stacking/stacking_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index 8fab00d3..17954acd 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -11,7 +11,7 @@ from pytorch_tabular.models.node import NODEBackbone -def instatiate_backbone(hparams, backbone_name): +def instantiate_backbone(hparams, backbone_name): backbone_class = getattr(getattr(models, hparams._module_src.split(".")[-1]), backbone_name) class_args = list(inspect.signature(backbone_class).parameters.keys()) if "config" in class_args: @@ -67,7 +67,7 @@ def _build_network(self): self.hparams.model_configs[model_i].inferred_config = self.hparams.inferred_config # instantiate backbone - _backbone = instatiate_backbone( + _backbone = instantiate_backbone( self.hparams.model_configs[model_i], self.hparams.model_configs[model_i]._backbone_name ) # set continuous_dim From d68962e8d7f85083c2281bf9adb7cc38f09af75f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:15:40 +0000 Subject: [PATCH 06/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pytorch_tabular/models/stacking/stacking_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index 17954acd..b8b95d2a 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -24,6 +24,7 @@ def instantiate_backbone(hparams, backbone_name): } ) + class StackingEmbeddingLayer(nn.Module): def __init__(self, embedding_layers: nn.ModuleList): super().__init__() @@ -36,6 +37,7 @@ def forward(self, x: dict[str, torch.Tensor]): outputs.append(em_output) return outputs + class StackingBackbone(nn.Module): def __init__(self, config: DictConfig): super().__init__() From 0c02ead360c1c6059f0dc55c8f2b99ca70c318a0 Mon Sep 17 00:00:00 2001 From: taimo Date: Thu, 12 Dec 2024 00:13:45 +0900 Subject: [PATCH 07/15] Add Stacking Model Documentation and Tutorial - Updated API documentation to include `StackingModelConfig` and `StackingModel`. - Added a new tutorial notebook demonstrating model stacking in PyTorch Tabular, covering setup, configuration, training, and evaluation. - Enhanced existing documentation to explain the model stacking concept and its benefits. This commit improves the usability and understanding of the stacking functionality in the library. --- docs/apidocs_model.md | 7 +- docs/models.md | 21 + docs/tutorials/16-Model Stacking.ipynb | 1486 ++++++++++++++++++++++++ 3 files changed, 1513 insertions(+), 1 deletion(-) create mode 100644 docs/tutorials/16-Model Stacking.ipynb diff --git a/docs/apidocs_model.md b/docs/apidocs_model.md index 01d8270f..d0312742 100644 --- a/docs/apidocs_model.md +++ b/docs/apidocs_model.md @@ -30,6 +30,9 @@ ::: pytorch_tabular.models.TabTransformerConfig options: heading_level: 3 +::: pytorch_tabular.models.StackingModelConfig + options: + heading_level: 3 ::: pytorch_tabular.config.ModelConfig options: heading_level: 3 @@ -66,7 +69,9 @@ ::: pytorch_tabular.models.TabTransformerModel options: heading_level: 3 - +::: pytorch_tabular.models.StackingModel + options: + heading_level: 3 ## Base Model Class ::: pytorch_tabular.models.BaseModel options: diff --git a/docs/models.md b/docs/models.md index 787746a7..48624128 100644 --- a/docs/models.md +++ b/docs/models.md @@ -253,6 +253,27 @@ All the parameters have beet set to recommended values from the paper. Let's loo **For a complete list of parameters refer to the API Docs** [pytorch_tabular.models.DANetConfig][] +## Model Stacking + +Model stacking is an ensemble learning technique that combines multiple base models to create a more powerful predictive model. Each base model processes the input features independently, and their outputs are concatenated before making the final prediction. This allows the model to leverage different learning patterns captured by each backbone architecture. You can use it by choosing `StackingModelConfig`. + +The following model architectures are supported for stacking: +- Category Embedding Model +- TabNet Model +- FTTransformer Model +- Gated Additive Tree Ensemble Model +- DANet Model +- AutoInt Model +- GANDALF Model +- Node Model + +All the parameters have been set to provide flexibility while maintaining ease of use. Let's look at them: + +- `model_configs`: List[ModelConfig]: List of configurations for each base model. Each config should be a valid PyTorch Tabular model config (e.g., NodeConfig, GANDALFConfig) + +**For a complete list of parameters refer to the API Docs** +[pytorch_tabular.models.StackingModelConfig][] + ## Implementing New Architectures PyTorch Tabular is very easy to extend and infinitely customizable. All the models that have been implemented in PyTorch Tabular inherits an Abstract Class `BaseModel` which is in fact a PyTorchLightning Model. diff --git a/docs/tutorials/16-Model Stacking.ipynb b/docs/tutorials/16-Model Stacking.ipynb new file mode 100644 index 00000000..4af4092c --- /dev/null +++ b/docs/tutorials/16-Model Stacking.ipynb @@ -0,0 +1,1486 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Model Stacking in PyTorch Tabular\n", + "\n", + "This page demonstrates how to use model stacking functionality in PyTorch Tabular to combine multiple models for better predictions.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Setup and Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "warnings.filterwarnings(\"ignore\")\n", + "import pandas as pd\n", + "import numpy as np\n", + "from sklearn.model_selection import train_test_split\n", + "from pytorch_tabular import TabularModel\n", + "from pytorch_tabular.models import (\n", + "CategoryEmbeddingModelConfig,\n", + "FTTransformerConfig,\n", + "TabNetModelConfig\n", + ")\n", + "from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig\n", + "from pytorch_tabular.models.stacking import StackingModelConfig\n", + "from pytorch_tabular.utils import make_mixed_dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create synthetic classification dataset & split into train, validation and test sets" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "data, cat_col_names, num_col_names = make_mixed_dataset(\n", + " task=\"classification\", n_samples=3000, n_features=7, n_categories=4\n", + ")\n", + "\n", + "train, test = train_test_split(data, random_state=42)\n", + "train, valid = train_test_split(train, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Common configurations" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "data_config = DataConfig(\n", + " target=[\"target\"],\n", + " continuous_cols=num_col_names,\n", + " categorical_cols=cat_col_names,\n", + ")\n", + "trainer_config = TrainerConfig(\n", + " batch_size=1024,\n", + " max_epochs=20,\n", + " early_stopping=\"valid_accuracy\",\n", + " early_stopping_mode=\"max\",\n", + " early_stopping_patience=3,\n", + " checkpoints=\"valid_accuracy\",\n", + " load_best=True,\n", + ")\n", + "optimizer_config = OptimizerConfig()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure individual models" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "model_config_1 = CategoryEmbeddingModelConfig(\n", + " task=\"classification\",\n", + " layers=\"128-64-32\",\n", + " activation=\"ReLU\",\n", + " learning_rate=1e-3\n", + ")\n", + "model_config_2 = FTTransformerConfig(\n", + " task=\"classification\",\n", + " input_embed_dim=32,\n", + " num_attn_blocks=2,\n", + " num_heads=4,\n", + " learning_rate=1e-3\n", + ")\n", + "model_config_3 = TabNetModelConfig(\n", + " task=\"classification\",\n", + " n_d=8,\n", + " n_a=8,\n", + " n_steps=3,\n", + " learning_rate=1e-3\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Configure Stacking Model\n", + "\n", + "Now let's set up the stacking configuration that will combine these models:" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "stacking_config = StackingModelConfig(\n", + " task=\"classification\",\n", + " model_configs=[\n", + " model_config_1,\n", + " model_config_2,\n", + " model_config_3\n", + " ],\n", + " head=\"LinearHead\",\n", + " head_config={\n", + " \"layers\": \"64\",\n", + " \"activation\": \"ReLU\",\n", + " \"dropout\": 0.1\n", + " },\n", + " learning_rate=1e-3\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Train Stacking Model" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,338 - {pytorch_tabular.tabular_model:147} - INFO - Experiment Tracking is turned off           \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m338\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m147\u001b[0m\u001b[1m}\u001b[0m - INFO - Experiment Tracking is turned off \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Seed set to 42\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,388 - {pytorch_tabular.tabular_model:549} - INFO - Preparing the DataLoaders                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m388\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m549\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the DataLoaders \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,394 - {pytorch_tabular.tabular_datamodule:527} - INFO - Setting up the datamodule for          \n",
+       "classification task                                                                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m394\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_datamodul\u001b[1;92me:527\u001b[0m\u001b[1m}\u001b[0m - INFO - Setting up the datamodule for \n", + "classification task \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,462 - {pytorch_tabular.tabular_model:600} - INFO - Preparing the Model: StackingModel          \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m462\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m600\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Model: StackingModel \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,516 - {pytorch_tabular.tabular_model:343} - INFO - Preparing the Trainer                       \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m516\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m343\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Trainer \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:35,813 - {pytorch_tabular.tabular_model:679} - INFO - Training Started                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:35\u001b[0m,\u001b[1;36m813\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m679\u001b[0m\u001b[1m}\u001b[0m - INFO - Training Started \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "text/html": [ + "
┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n",
+       "┃    Name              Type                    Params  Mode  ┃\n",
+       "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n",
+       "│ 0 │ _backbone        │ StackingBackbone       │ 77.2 K │ train │\n",
+       "│ 1 │ _embedding_layer │ StackingEmbeddingLayer │    917 │ train │\n",
+       "│ 2 │ _head            │ LinearHead             │ 12.5 K │ train │\n",
+       "│ 3 │ loss             │ CrossEntropyLoss       │      0 │ train │\n",
+       "└───┴──────────────────┴────────────────────────┴────────┴───────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n", + "┃\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mName \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mType \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mParams\u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mMode \u001b[0m\u001b[1;35m \u001b[0m┃\n", + "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n", + "│\u001b[2m \u001b[0m\u001b[2m0\u001b[0m\u001b[2m \u001b[0m│ _backbone │ StackingBackbone │ 77.2 K │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m1\u001b[0m\u001b[2m \u001b[0m│ _embedding_layer │ StackingEmbeddingLayer │ 917 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m2\u001b[0m\u001b[2m \u001b[0m│ _head │ LinearHead │ 12.5 K │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m3\u001b[0m\u001b[2m \u001b[0m│ loss │ CrossEntropyLoss │ 0 │ train │\n", + "└───┴──────────────────┴────────────────────────┴────────┴───────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Trainable params: 90.6 K                                                                                           \n",
+       "Non-trainable params: 0                                                                                            \n",
+       "Total params: 90.6 K                                                                                               \n",
+       "Total estimated model params size (MB): 0                                                                          \n",
+       "Modules in train mode: 188                                                                                         \n",
+       "Modules in eval mode: 0                                                                                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1mTrainable params\u001b[0m: 90.6 K \n", + "\u001b[1mNon-trainable params\u001b[0m: 0 \n", + "\u001b[1mTotal params\u001b[0m: 90.6 K \n", + "\u001b[1mTotal estimated model params size (MB)\u001b[0m: 0 \n", + "\u001b[1mModules in train mode\u001b[0m: 188 \n", + "\u001b[1mModules in eval mode\u001b[0m: 0 \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3cd6f3938b1f419c8b07eb89ffa13bf4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:02:39,304 - {pytorch_tabular.tabular_model:692} - INFO - Training the model completed                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:39\u001b[0m,\u001b[1;36m304\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m692\u001b[0m\u001b[1m}\u001b[0m - INFO - Training the model completed \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:02:39,307 - {pytorch_tabular.tabular_model:1533} - INFO - Loading the best model                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:02:39\u001b[0m,\u001b[1;36m307\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m1533\u001b[0m\u001b[1m}\u001b[0m - INFO - Loading the best model \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "stacking_model = TabularModel(\n", + " data_config=data_config,\n", + " model_config=stacking_config,\n", + " optimizer_config=optimizer_config,\n", + " trainer_config=trainer_config,\n", + ")\n", + "stacking_model.fit(\n", + " train=train,\n", + " validation=valid\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate Results" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b1616690de674da8bbc8cc985f19686a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
+       "┃        Test metric               DataLoader 0        ┃\n",
+       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
+       "│       test_accuracy           0.5960000157356262     │\n",
+       "│         test_loss             0.7419928312301636     │\n",
+       "│        test_loss_0            0.7419928312301636     │\n",
+       "└───────────────────────────┴───────────────────────────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.5960000157356262 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.7419928312301636 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss_0 \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.7419928312301636 \u001b[0m\u001b[35m \u001b[0m│\n", + "└───────────────────────────┴───────────────────────────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "predictions = stacking_model.predict(test)\n",
+    "stacking_metrics = stacking_model.evaluate(test)[0]\n",
+    "stacking_acc = stacking_metrics[\"test_accuracy\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Compare with individual models"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 31,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def train_and_evaluate_model(model_config, name):\n",
+    "    model = TabularModel(\n",
+    "    data_config=data_config,\n",
+    "    model_config=model_config,\n",
+    "    optimizer_config=optimizer_config,\n",
+    "    trainer_config=trainer_config,\n",
+    "    )\n",
+    "    model.fit(train=train, validation=valid)\n",
+    "    metrics = model.evaluate(test)\n",
+    "    print(f\"\\n{name} Metrics:\")\n",
+    "    print(metrics)\n",
+    "    return metrics"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 35,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:01,257 - {pytorch_tabular.tabular_model:147} - INFO - Experiment Tracking is turned off           \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m257\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m147\u001b[0m\u001b[1m}\u001b[0m - INFO - Experiment Tracking is turned off \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Seed set to 42\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:01,320 - {pytorch_tabular.tabular_model:549} - INFO - Preparing the DataLoaders                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m320\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m549\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the DataLoaders \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:01,340 - {pytorch_tabular.tabular_datamodule:527} - INFO - Setting up the datamodule for          \n",
+       "classification task                                                                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m340\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_datamodul\u001b[1;92me:527\u001b[0m\u001b[1m}\u001b[0m - INFO - Setting up the datamodule for \n", + "classification task \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:01,376 - {pytorch_tabular.tabular_model:600} - INFO - Preparing the Model: CategoryEmbeddingModel \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m376\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m600\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Model: CategoryEmbeddingModel \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:01,411 - {pytorch_tabular.tabular_model:343} - INFO - Preparing the Trainer                       \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m411\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m343\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Trainer \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:01,638 - {pytorch_tabular.tabular_model:679} - INFO - Training Started                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:01\u001b[0m,\u001b[1;36m638\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m679\u001b[0m\u001b[1m}\u001b[0m - INFO - Training Started \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "text/html": [ + "
┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n",
+       "┃    Name              Type                       Params  Mode  ┃\n",
+       "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n",
+       "│ 0 │ _backbone        │ CategoryEmbeddingBackbone │ 12.1 K │ train │\n",
+       "│ 1 │ _embedding_layer │ Embedding1dLayer          │     53 │ train │\n",
+       "│ 2 │ head             │ LinearHead                │     66 │ train │\n",
+       "│ 3 │ loss             │ CrossEntropyLoss          │      0 │ train │\n",
+       "└───┴──────────────────┴───────────────────────────┴────────┴───────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n", + "┃\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mName \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mType \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mParams\u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mMode \u001b[0m\u001b[1;35m \u001b[0m┃\n", + "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n", + "│\u001b[2m \u001b[0m\u001b[2m0\u001b[0m\u001b[2m \u001b[0m│ _backbone │ CategoryEmbeddingBackbone │ 12.1 K │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m1\u001b[0m\u001b[2m \u001b[0m│ _embedding_layer │ Embedding1dLayer │ 53 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m2\u001b[0m\u001b[2m \u001b[0m│ head │ LinearHead │ 66 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m3\u001b[0m\u001b[2m \u001b[0m│ loss │ CrossEntropyLoss │ 0 │ train │\n", + "└───┴──────────────────┴───────────────────────────┴────────┴───────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Trainable params: 12.2 K                                                                                           \n",
+       "Non-trainable params: 0                                                                                            \n",
+       "Total params: 12.2 K                                                                                               \n",
+       "Total estimated model params size (MB): 0                                                                          \n",
+       "Modules in train mode: 19                                                                                          \n",
+       "Modules in eval mode: 0                                                                                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1mTrainable params\u001b[0m: 12.2 K \n", + "\u001b[1mNon-trainable params\u001b[0m: 0 \n", + "\u001b[1mTotal params\u001b[0m: 12.2 K \n", + "\u001b[1mTotal estimated model params size (MB)\u001b[0m: 0 \n", + "\u001b[1mModules in train mode\u001b[0m: 19 \n", + "\u001b[1mModules in eval mode\u001b[0m: 0 \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "03ed36b48da24bb19f036d1db4422cb7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "`Trainer.fit` stopped: `max_epochs=20` reached.\n" + ] + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:04,935 - {pytorch_tabular.tabular_model:692} - INFO - Training the model completed                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:04\u001b[0m,\u001b[1;36m935\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m692\u001b[0m\u001b[1m}\u001b[0m - INFO - Training the model completed \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:04,938 - {pytorch_tabular.tabular_model:1533} - INFO - Loading the best model                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:04\u001b[0m,\u001b[1;36m938\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m1533\u001b[0m\u001b[1m}\u001b[0m - INFO - Loading the best model \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bdcb7befb3b340a895a5399394780d7e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
+       "┃        Test metric               DataLoader 0        ┃\n",
+       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
+       "│       test_accuracy           0.4586666524410248     │\n",
+       "│         test_loss             0.8828091025352478     │\n",
+       "│        test_loss_0            0.8828091025352478     │\n",
+       "└───────────────────────────┴───────────────────────────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.4586666524410248 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.8828091025352478 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss_0 \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.8828091025352478 \u001b[0m\u001b[35m \u001b[0m│\n", + "└───────────────────────────┴───────────────────────────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "Category Embedding Metrics:\n",
+      "[{'test_loss_0': 0.8828091025352478, 'test_loss': 0.8828091025352478, 'test_accuracy': 0.4586666524410248}]\n"
+     ]
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:05,183 - {pytorch_tabular.tabular_model:147} - INFO - Experiment Tracking is turned off           \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m183\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m147\u001b[0m\u001b[1m}\u001b[0m - INFO - Experiment Tracking is turned off \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Seed set to 42\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:05,263 - {pytorch_tabular.tabular_model:549} - INFO - Preparing the DataLoaders                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m263\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m549\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the DataLoaders \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:05,272 - {pytorch_tabular.tabular_datamodule:527} - INFO - Setting up the datamodule for          \n",
+       "classification task                                                                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m272\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_datamodul\u001b[1;92me:527\u001b[0m\u001b[1m}\u001b[0m - INFO - Setting up the datamodule for \n", + "classification task \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:05,294 - {pytorch_tabular.tabular_model:600} - INFO - Preparing the Model: FTTransformerModel     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m294\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m600\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Model: FTTransformerModel \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:05,323 - {pytorch_tabular.tabular_model:343} - INFO - Preparing the Trainer                       \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m323\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m343\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Trainer \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:05,623 - {pytorch_tabular.tabular_model:679} - INFO - Training Started                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:05\u001b[0m,\u001b[1;36m623\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m679\u001b[0m\u001b[1m}\u001b[0m - INFO - Training Started \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "text/html": [ + "
┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n",
+       "┃    Name              Type                   Params  Mode  ┃\n",
+       "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n",
+       "│ 0 │ _backbone        │ FTTransformerBackbone │ 57.7 K │ train │\n",
+       "│ 1 │ _embedding_layer │ Embedding2dLayer      │    864 │ train │\n",
+       "│ 2 │ _head            │ LinearHead            │     66 │ train │\n",
+       "│ 3 │ loss             │ CrossEntropyLoss      │      0 │ train │\n",
+       "└───┴──────────────────┴───────────────────────┴────────┴───────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n", + "┃\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mName \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mType \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mParams\u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mMode \u001b[0m\u001b[1;35m \u001b[0m┃\n", + "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n", + "│\u001b[2m \u001b[0m\u001b[2m0\u001b[0m\u001b[2m \u001b[0m│ _backbone │ FTTransformerBackbone │ 57.7 K │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m1\u001b[0m\u001b[2m \u001b[0m│ _embedding_layer │ Embedding2dLayer │ 864 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m2\u001b[0m\u001b[2m \u001b[0m│ _head │ LinearHead │ 66 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m3\u001b[0m\u001b[2m \u001b[0m│ loss │ CrossEntropyLoss │ 0 │ train │\n", + "└───┴──────────────────┴───────────────────────┴────────┴───────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Trainable params: 58.6 K                                                                                           \n",
+       "Non-trainable params: 0                                                                                            \n",
+       "Total params: 58.6 K                                                                                               \n",
+       "Total estimated model params size (MB): 0                                                                          \n",
+       "Modules in train mode: 56                                                                                          \n",
+       "Modules in eval mode: 0                                                                                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1mTrainable params\u001b[0m: 58.6 K \n", + "\u001b[1mNon-trainable params\u001b[0m: 0 \n", + "\u001b[1mTotal params\u001b[0m: 58.6 K \n", + "\u001b[1mTotal estimated model params size (MB)\u001b[0m: 0 \n", + "\u001b[1mModules in train mode\u001b[0m: 56 \n", + "\u001b[1mModules in eval mode\u001b[0m: 0 \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "62184d0ac93049058c153f2e93518d0f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:07,482 - {pytorch_tabular.tabular_model:692} - INFO - Training the model completed                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m482\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m692\u001b[0m\u001b[1m}\u001b[0m - INFO - Training the model completed \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:07,488 - {pytorch_tabular.tabular_model:1533} - INFO - Loading the best model                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m488\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m1533\u001b[0m\u001b[1m}\u001b[0m - INFO - Loading the best model \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5a482fb9cd5045e3ada1beac9c114d97", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
+       "┃        Test metric               DataLoader 0        ┃\n",
+       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
+       "│       test_accuracy           0.5546666383743286     │\n",
+       "│         test_loss             0.6846821904182434     │\n",
+       "│        test_loss_0            0.6846821904182434     │\n",
+       "└───────────────────────────┴───────────────────────────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.5546666383743286 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.6846821904182434 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss_0 \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.6846821904182434 \u001b[0m\u001b[35m \u001b[0m│\n", + "└───────────────────────────┴───────────────────────────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "FT Transformer Metrics:\n",
+      "[{'test_loss_0': 0.6846821904182434, 'test_loss': 0.6846821904182434, 'test_accuracy': 0.5546666383743286}]\n"
+     ]
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:07,824 - {pytorch_tabular.tabular_model:147} - INFO - Experiment Tracking is turned off           \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m824\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m147\u001b[0m\u001b[1m}\u001b[0m - INFO - Experiment Tracking is turned off \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Seed set to 42\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:07,863 - {pytorch_tabular.tabular_model:549} - INFO - Preparing the DataLoaders                   \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m863\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m549\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the DataLoaders \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:07,870 - {pytorch_tabular.tabular_datamodule:527} - INFO - Setting up the datamodule for          \n",
+       "classification task                                                                                                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m870\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_datamodul\u001b[1;92me:527\u001b[0m\u001b[1m}\u001b[0m - INFO - Setting up the datamodule for \n", + "classification task \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:07,900 - {pytorch_tabular.tabular_model:600} - INFO - Preparing the Model: TabNetModel            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m900\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m600\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Model: TabNetModel \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:07,965 - {pytorch_tabular.tabular_model:343} - INFO - Preparing the Trainer                       \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:07\u001b[0m,\u001b[1;36m965\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m343\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Trainer \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n" + ] + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:08,200 - {pytorch_tabular.tabular_model:679} - INFO - Training Started                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:08\u001b[0m,\u001b[1;36m200\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m679\u001b[0m\u001b[1m}\u001b[0m - INFO - Training Started \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "text/html": [ + "
┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n",
+       "┃    Name              Type              Params  Mode  ┃\n",
+       "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n",
+       "│ 0 │ _embedding_layer │ Identity         │      0 │ train │\n",
+       "│ 1 │ _backbone        │ TabNetBackbone   │  6.4 K │ train │\n",
+       "│ 2 │ _head            │ Identity         │      0 │ train │\n",
+       "│ 3 │ loss             │ CrossEntropyLoss │      0 │ train │\n",
+       "└───┴──────────────────┴──────────────────┴────────┴───────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━┓\n", + "┃\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mName \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mType \u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mParams\u001b[0m\u001b[1;35m \u001b[0m┃\u001b[1;35m \u001b[0m\u001b[1;35mMode \u001b[0m\u001b[1;35m \u001b[0m┃\n", + "┡━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━┩\n", + "│\u001b[2m \u001b[0m\u001b[2m0\u001b[0m\u001b[2m \u001b[0m│ _embedding_layer │ Identity │ 0 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m1\u001b[0m\u001b[2m \u001b[0m│ _backbone │ TabNetBackbone │ 6.4 K │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m2\u001b[0m\u001b[2m \u001b[0m│ _head │ Identity │ 0 │ train │\n", + "│\u001b[2m \u001b[0m\u001b[2m3\u001b[0m\u001b[2m \u001b[0m│ loss │ CrossEntropyLoss │ 0 │ train │\n", + "└───┴──────────────────┴──────────────────┴────────┴───────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
Trainable params: 6.4 K                                                                                            \n",
+       "Non-trainable params: 0                                                                                            \n",
+       "Total params: 6.4 K                                                                                                \n",
+       "Total estimated model params size (MB): 0                                                                          \n",
+       "Modules in train mode: 111                                                                                         \n",
+       "Modules in eval mode: 0                                                                                            \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1mTrainable params\u001b[0m: 6.4 K \n", + "\u001b[1mNon-trainable params\u001b[0m: 0 \n", + "\u001b[1mTotal params\u001b[0m: 6.4 K \n", + "\u001b[1mTotal estimated model params size (MB)\u001b[0m: 0 \n", + "\u001b[1mModules in train mode\u001b[0m: 111 \n", + "\u001b[1mModules in eval mode\u001b[0m: 0 \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2e27939fc57d4c9585a3252b035e74f8", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:09:09,766 - {pytorch_tabular.tabular_model:692} - INFO - Training the model completed                \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:09\u001b[0m,\u001b[1;36m766\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m692\u001b[0m\u001b[1m}\u001b[0m - INFO - Training the model completed \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:09:09,767 - {pytorch_tabular.tabular_model:1533} - INFO - Loading the best model                     \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:09:09\u001b[0m,\u001b[1;36m767\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m1533\u001b[0m\u001b[1m}\u001b[0m - INFO - Loading the best model \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8a98c3a2c4ce4bcaac279982ec86bd8f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
+       "┃        Test metric               DataLoader 0        ┃\n",
+       "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
+       "│       test_accuracy           0.4346666634082794     │\n",
+       "│         test_loss             1.1570961475372314     │\n",
+       "│        test_loss_0            1.1570961475372314     │\n",
+       "└───────────────────────────┴───────────────────────────┘\n",
+       "
\n" + ], + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", + "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.4346666634082794 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 1.1570961475372314 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_loss_0 \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 1.1570961475372314 \u001b[0m\u001b[35m \u001b[0m│\n", + "└───────────────────────────┴───────────────────────────┘\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n"
+      ],
+      "text/plain": []
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "TabNet Metrics:\n",
+      "[{'test_loss_0': 1.1570961475372314, 'test_loss': 1.1570961475372314, 'test_accuracy': 0.4346666634082794}]\n"
+     ]
+    }
+   ],
+   "source": [
+    "ce_metrics = train_and_evaluate_model(model_config_1, \"Category Embedding\")[0]\n",
+    "ft_metrics = train_and_evaluate_model(model_config_2, \"FT Transformer\")[0]\n",
+    "tab_metrics = train_and_evaluate_model(model_config_3, \"TabNet\")[0]\n",
+    "ce_acc = ce_metrics[\"test_accuracy\"]\n",
+    "ft_acc = ft_metrics[\"test_accuracy\"]\n",
+    "tab_acc = tab_metrics[\"test_accuracy\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 37,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Stacking Model Test Accuracy: 0.5960000157356262\n",
+      "Category Embedding Model Test Accucacy: 0.4586666524410248\n",
+      "FT Transformer Model Test Accuracy: 0.5546666383743286\n",
+      "TabNet Model Test Accuracy: 0.4346666634082794\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"Stacking Model Test Accuracy: {}\".format(stacking_acc))\n",
+    "print(\"Category Embedding Model Test Accucacy: {}\".format(ce_acc))\n",
+    "print(\"FT Transformer Model Test Accuracy: {}\".format(ft_acc))\n",
+    "print(\"TabNet Model Test Accuracy: {}\".format(tab_acc))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Save the stacking model & load it"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 22,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "
2024-12-12 00:00:31,524 - {pytorch_tabular.tabular_model:1579} - WARNING - Directory is not empty. Overwriting the \n",
+       "contents.                                                                                                          \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:00:31\u001b[0m,\u001b[1;36m524\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m1579\u001b[0m\u001b[1m}\u001b[0m - WARNING - Directory is not empty. Overwriting the \n", + "contents. \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stacking_model.save_model(\"stacking_model\")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
2024-12-12 00:00:32,437 - {pytorch_tabular.tabular_model:172} - INFO - Experiment Tracking is turned off           \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:00:32\u001b[0m,\u001b[1;36m437\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m172\u001b[0m\u001b[1m}\u001b[0m - INFO - Experiment Tracking is turned off \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
2024-12-12 00:00:32,452 - {pytorch_tabular.tabular_model:343} - INFO - Preparing the Trainer                       \n",
+       "
\n" + ], + "text/plain": [ + "\u001b[1;36m2024\u001b[0m-\u001b[1;36m12\u001b[0m-\u001b[1;36m12\u001b[0m \u001b[1;92m00:00:32\u001b[0m,\u001b[1;36m452\u001b[0m - \u001b[1m{\u001b[0mpytorch_tabular.tabular_model:\u001b[1;36m343\u001b[0m\u001b[1m}\u001b[0m - INFO - Preparing the Trainer \n" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Trainer already configured with model summary callbacks: []. Skipping setting a default `ModelSummary` callback.\n", + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n" + ] + } + ], + "source": [ + "loaded_model = TabularModel.load_model(\"stacking_model\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Key Points About Stacking\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "1. The stacking model combines predictions from multiple base models into a final prediction\n", + "2. Each base model can have its own architecture and hyperparameters\n", + "3. The head layer combines the outputs from all base models\n", + "4. Base models are trained simultaneously\n", + "5. The stacking model can often achieve better performance than individual models\n", + "\n", + "## Tips for Better Stacking Results\n", + "\n", + "1. Use diverse base models that capture different aspects of the data\n", + "2. Experiment with different head architectures\n", + "3. Consider using cross-validation for more robust stacking\n", + "4. Balance model complexity with training time\n", + "5. Monitor individual model performances to ensure they contribute meaningfully\n", + "\n", + "This example demonstrates basic stacking functionality. For production use cases, you may want to:\n", + "- Use cross-validation\n", + "- Implement more sophisticated ensemble techniques\n", + "- Add custom metrics\n", + "- Tune hyperparameters for both base models and stacking head" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 69e0c43f7db998bb36eaf8dca105ee65fecd848d Mon Sep 17 00:00:00 2001 From: taimo Date: Thu, 12 Dec 2024 00:17:00 +0900 Subject: [PATCH 08/15] Refactor: Remove GatedAdditiveTreeEnsembleConfig from model configuration This commit removes the GatedAdditiveTreeEnsembleConfig lambda function from the get_model_configs function in the test_model_stacking.py file, streamlining the model configuration process. This change enhances code clarity and focuses on the relevant model configurations for stacking. --- tests/test_model_stacking.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_model_stacking.py b/tests/test_model_stacking.py index d5ea9709..629c19a4 100644 --- a/tests/test_model_stacking.py +++ b/tests/test_model_stacking.py @@ -43,9 +43,6 @@ def get_model_configs(task): lambda task: GANDALFConfig( task=task, ), - lambda task: GatedAdditiveTreeEnsembleConfig( - task=task, - ), lambda task: NodeConfig( task=task, ), From ba375fd2f431da34a476bed2b086a54057ca54c3 Mon Sep 17 00:00:00 2001 From: taimo Date: Thu, 12 Dec 2024 00:17:27 +0900 Subject: [PATCH 09/15] Update mkdocs.yml to include new Model Stacking section in documentation - Added a new entry for "Model Stacking" in the navigation structure. - Included a link to the tutorial notebook "tutorials/16-Model Stacking.ipynb" for users to learn about model stacking. This change enhances the documentation by providing users with direct access to resources related to model stacking. --- mkdocs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mkdocs.yml b/mkdocs.yml index 59a38a83..7b8f5122 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -24,6 +24,8 @@ nav: - SHAP, Deep LIFT and so on through Captum Integration: "tutorials/14-Explainability.ipynb" - Custom PyTorch Models: - Implementing New Supervised Architectures: "tutorials/04-Implementing New Architectures.ipynb" + - Model Stacking: + - Model Stacking: "tutorials/16-Model Stacking.ipynb" - Other Features: - Using Neural Categorical Embeddings in Scikit-Learn Workflows: "tutorials/03-Neural Embedding in Scikit-Learn Workflows.ipynb" - Self-Supervised Learning using Denoising Autoencoders: "tutorials/08-Self-Supervised Learning-DAE.ipynb" From 6e069469552be630bbca139bd1726ddea77126bd Mon Sep 17 00:00:00 2001 From: taimo Date: Thu, 12 Dec 2024 00:31:47 +0900 Subject: [PATCH 10/15] Refactor mkdocs.yml to streamline navigation structure - Removed unnecessary indentation for the "Model Stacking" entry in the navigation. - Maintained the link to the tutorial notebook "tutorials/16-Model Stacking.ipynb" for user access. This change improves the clarity of the documentation structure without altering the content. --- mkdocs.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 7b8f5122..9f3aeca7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -24,8 +24,7 @@ nav: - SHAP, Deep LIFT and so on through Captum Integration: "tutorials/14-Explainability.ipynb" - Custom PyTorch Models: - Implementing New Supervised Architectures: "tutorials/04-Implementing New Architectures.ipynb" - - Model Stacking: - - Model Stacking: "tutorials/16-Model Stacking.ipynb" + - Model Stacking: "tutorials/16-Model Stacking.ipynb" - Other Features: - Using Neural Categorical Embeddings in Scikit-Learn Workflows: "tutorials/03-Neural Embedding in Scikit-Learn Workflows.ipynb" - Self-Supervised Learning using Denoising Autoencoders: "tutorials/08-Self-Supervised Learning-DAE.ipynb" From 69d5bcc487cff4e18b788726b4690171b587e8c0 Mon Sep 17 00:00:00 2001 From: taimo Date: Fri, 13 Dec 2024 23:23:47 +0900 Subject: [PATCH 11/15] Refactor StackingModelConfig to simplify model_configs type annotation - Changed the type annotation of model_configs from list[ModelConfig] to list --- src/pytorch_tabular/models/stacking/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_tabular/models/stacking/config.py b/src/pytorch_tabular/models/stacking/config.py index 23be156e..586cddab 100644 --- a/src/pytorch_tabular/models/stacking/config.py +++ b/src/pytorch_tabular/models/stacking/config.py @@ -14,7 +14,7 @@ class StackingModelConfig(ModelConfig): """ - model_configs: list[ModelConfig] = field(default_factory=list, metadata={"help": "List of model configs to stack"}) + model_configs: list = field(default_factory=list, metadata={"help": "List of model configs to stack"}) _module_src: str = field(default="models.stacking") _model_name: str = field(default="StackingModel") _backbone_name: str = field(default="StackingBackbone") From b495269bcb723ef580d0a653c4594976dcef2ed1 Mon Sep 17 00:00:00 2001 From: taimo Date: Sat, 14 Dec 2024 09:39:42 +0900 Subject: [PATCH 12/15] Refactor StackingBackbone forward method to remove type annotation --- src/pytorch_tabular/models/stacking/stacking_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index b8b95d2a..461b6065 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -96,7 +96,7 @@ def _build_embedding_layer(self): embedding_layers.append(backbone._build_embedding_layer()) return StackingEmbeddingLayer(embedding_layers) - def forward(self, x_list: list[torch.Tensor]): + def forward(self, x_list): outputs = [] for i, backbone in enumerate(self._backbones): bb_output = backbone(x_list[i]) From 86df3ad74201d98272a9b15355e7a29fa126d0aa Mon Sep 17 00:00:00 2001 From: taimo Date: Sat, 14 Dec 2024 17:08:08 +0900 Subject: [PATCH 13/15] Refactor StackingEmbeddingLayer to remove type annotation from forward method --- src/pytorch_tabular/models/stacking/stacking_model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index 461b6065..691f577e 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -30,14 +30,13 @@ def __init__(self, embedding_layers: nn.ModuleList): super().__init__() self.embedding_layers = embedding_layers - def forward(self, x: dict[str, torch.Tensor]): + def forward(self, x): outputs = [] for embedding_layer in self.embedding_layers: em_output = embedding_layer(x) outputs.append(em_output) return outputs - class StackingBackbone(nn.Module): def __init__(self, config: DictConfig): super().__init__() From 996e382ed53e46e11954717387e62145a1626df9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 14 Dec 2024 08:08:21 +0000 Subject: [PATCH 14/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pytorch_tabular/models/stacking/stacking_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pytorch_tabular/models/stacking/stacking_model.py b/src/pytorch_tabular/models/stacking/stacking_model.py index 691f577e..a1090089 100644 --- a/src/pytorch_tabular/models/stacking/stacking_model.py +++ b/src/pytorch_tabular/models/stacking/stacking_model.py @@ -37,6 +37,7 @@ def forward(self, x): outputs.append(em_output) return outputs + class StackingBackbone(nn.Module): def __init__(self, config: DictConfig): super().__init__() From 4f16204ed798e83239bdea7bbe8c504adc49e899 Mon Sep 17 00:00:00 2001 From: taimo Date: Sun, 15 Dec 2024 11:01:41 +0900 Subject: [PATCH 15/15] Add model stacking diagram and enhance documentation --- docs/imgs/model_stacking_concept.png | Bin 0 -> 60611 bytes docs/models.md | 5 ++++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 docs/imgs/model_stacking_concept.png diff --git a/docs/imgs/model_stacking_concept.png b/docs/imgs/model_stacking_concept.png new file mode 100644 index 0000000000000000000000000000000000000000..6a0b36fd7fe473b300d7662ca884d6d96dbfb927 GIT binary patch literal 60611 zcmeGEcR1C5{6CH#$H6f>h=z3}D>E}Pj+qh4$jnHQWMuCVB@KiqTZ(8s!~QvmTGf^M1eG_x=7nC;FKF5qg@vG$<5` zURz7u5QU<^qEO`OSW0+j?Lf5}3Pq>yq^5RETTKmr%)`yW>HHZKN-O%7DV3Q~9czZA zAvXrAPSE+Khr7u{(7}q##o!NcQ{!~5Q>(m>5q;8U!N@2qR7P{RgN%`FtU&16DPNIm z6qjy!UpP5YpNb^zFu@x;jB& zL)l~GbXgSVQPsOX-cS4W%N^C=zPdQG!beD)l%~4huDrVG{FUkIAQ}~S_Daho4;9Xu zD6%R`jG?&_N?ma9v*VENt(SDCIQDqqe(vRMH122R)i>=Iijev9)aptmig03N_RJmB z**TRA6a2?Wd9U3f?(x|7Ojp+N3v~^t+>w5aH8Ni)8?{Zxid!n1>(){oR<7`6X3AYO zc}mCX5oRX(d8lbwx#dr#uJw~sehkiLZN;a#pVr^D>&Y1Wgf%+leWh2YnI{L|PKkQ| zzFH-a*TF$hmC1PF!3{#=B{}k=SNQ8Z2mI869>sCtD4%lI?1}&Jahf~zF5QK=X3F6m zk8vZjKe^i@UTW^&ah>g%9B(S)VTGyBbbFE|YkX(VL?{W&D+_rr9Xb$v^ssIXTXQWt zZSl_pe6A|{F}(iuNHg-ETK@W09dUZ+>{w+zIy7^yn0z=z_Vnm}>IKtL`=LG0vO2g| zyZvYSJDwVupRyjrRitFHaNVtESx~Ryn&bL#aXxx~TBIGv%^;5@cVX4zgscNTX~OaQ zYx4c93{s}J9`3!?rR|v$HE|*IxNA|oY{VgJYn-e?DV|GI|CW-q@aNbV)8%MWhgr28 zla0Prl|y|OQB`OfvzE7Xar8Bl%7JB5o5 z3p$;)XhW)>(C944RL;JRzXJGg0F5&m&LwC>p-gf9I{Pac8`6HfKgU zcz^GYn;BncJuB-gaHV!RB2V3(s&)8JNck{2zuikldpKI!&ZXl5FQ@qh14`2$)90T? zYzz|8zd8>$B$zlD{x%+I9hf`bmMoT%UKq6JmE=9o(?35w3Sy=Dy?Y<4!9I8PS`Qx& zA2PzL%?CxA1CjtJ|3=m zBI~F7&?QtQyVugfD7n2Y!=fxzAKj0zUDM>@_?_zEesOfUDpSRM^2y=mwwz{kS5*Yf z=-VT7idfE5N!yAY!tPa}JwzXZNv>jXC;xHjVIHjlRdAp5QWzdfoy|?B&UH{lm@%+s zj~$Pa7M>}lW?!Aw-uP%O9EHRV^NWf5oNwx!W;49TPKeo;ck`MSl?Rpol{tLqzNzDM zCP%!F@wC!##6CODu=nzZ9HXyCz4$Dyh-#SLWjQNdM&&{<$VvXfc9wsIeXo& zqfcb^_Ey3)x^E@K_mrZ9-qt@o_lVQ^a?|qO_UQRjQ||j3tIS`LTfOD>LyJ~9{-m8| z@#D3QknMB)mprWjn~Ti^U^(7K?1n8dN zNsUXdQyJX(EYeC{lz)mZ6K}(-%oCb*)=*e5BT?XLoJ7>KtIr~ukG?uOdN(NR-0{cx zJ*|G{WV{bPzi45gc%=MD@crCehuq<8)*QYQmiJC%m)cBT>{NU1JJvboGiEh!?0soP zXa3|^UhC9@%DZ3Bo{&Km_eptm4rh)G{Zjq>M(cE+bfuFnCwFzL zcYiN4DXaB9d}nvd)jIoO3Dbi+B-|u=V-N0qw})qManRBA+Yjw~e%)xBZDAh0^=q=_ zb@rll=6$XQnK_l3mB%+L$Xsn5arWX3w%6XuMf5zCw=e%d^C9WF;-Q6IYOP-y#oOJV z^!vP%c^ATCpJU6D|5YG zE{O(@h#~(5*%R$2v`;iV+WWZd@%$tI_Dk*F?ThEn%2dcmx)iuP>0)yEUMg3b>QdN| z?eoOP+{eTxdGdImY~a&%ht-%1kry}z_74=)c6%K2?K`ydP^+AR*J=67hgs!hEgXUIQ*N>^P%tfg%|Dyp1p%1*B-x%aVWI4na2Aq1q#od{o^+> zI(%o!|I6$SF{(oL!fxyCgh%|*e3Q55I=VikPL+Rt-4ZpFFg#mskXM*5*88G!@n+_$ z%r9$IHp8JcoAloj4rc})K+$3u1gOvMXk|ExeMA?8JI$K7$9Pxk zu8F-0vP;HT?)}22^*)MZFd9@n9GbsW7S*^zku8^#b=QeK3j#kxtyp{5!!;u{A6^!akKYqXb_GjbI!=F=p z9s32CESN{5^P`_fyTzm9+xfipIhFl426gKw>wXUA?ljanRo|GnAsBAdZZN9jttX;i zF4q!_MC&v17=)tE;)IDbz1$sfecuD1w@~8JBG{dEhlIbt}dfqLkEqAfvX`6SIh)UVK z^s@VQw@*X*TK+Xp)=jHH;s@+PWE)tF4y?e+s_8`ZnbKMlZuX zj+g%86Sm{r3)w60lA>fe?AQgJ2z!U6=A<2@ePnoLz1is%J3A-Zf(CC7JUuCrD`HaX z9a|``qWE;KrmC(wVLk6&UVwpPM#vBO^7-F)>KvbB-jM4G5uNyPi7O&R!85~iV~}Es zR%Dl=80UKQy3qjd3tf7bCoV@qeAbt>URwsfJG0oJXR7c-%J=Blp$lz`W24Sw-$LFr zz784H9J~>eN-gWwt5e?QZ|V1XRsVN-a^=ponf~8*10w@@7p13?mBoXOtv{SBT$S&Z zk5#7g>sS!@{k6OHVr^6A$IPG!iv^er-?{&RU218O@y7boqRE;q~lP zLXYyQ_&Cp~{d@i{gU^}olzQ1dvh9wWIkWcGhKx=iR%Q=s?G-J`LIOqS=Xy{7c=`@1 z+Pm^C4Ev@ti^k~W%zmIAL~YI;ET5hlW<{;~qHHPVNApqYEvPGts9!bFJl;#fvVOmf zS@$Wh1#!&_DvEKerQVfK2pL-Z#VTNOvg+4@&6;Ec^^2I`<4|B+J!7Wrpr?mA0MD@~ z4B1{31w12zKP57@f1YcS382V{@1s$uC?^z#v_>Buk$-XUhwRyUB#*z0qJmdU@aLC> z-d;_C%_84^POc80p;V02w6)>U$ll}38CTDhS@oWaY8ii?WxQ=-A+@d_TN9pnwwHA&mymEyj$US96pNOj`&(xzIoVw&*t$Ag=%4KKb{G|JiBw@6Lk~hYtOB>wiA_pIc2l&v>Z0xxh8Ol>U_$Y4d+S zByCg>LsgnjUdaxf`##D?vABU%Y(lSsficUXSP)cU3*^qr0Nx z-B+UYchRMMxNlV^kessrZo|Q|WjYB`f*&3m3uqjFwI0~MS=k=a6xdU|xcX%MSxA$4 zrsBYxCTnNKz|HoN#ZR}F8!ZAq`!7`%4r+v<$j}%p4o~>^2|f=3?#r+D@0;7p zvY2o<3+(^cfqWPsjK@!0m`#`e-_Apkk!4H&=lp+;szsx#Ic0nEng4tBtxw@1wg;LUpv|MCjTJw*G~R=;r}h#{Quid!Wq#vYd?1#VYibF|B9w%72X{j z$FuFuIj~slVZ!3AtpG-9s6G9zXKN%H!9B0T<{}|m!Q)XuA508fXjSHGF9u-D%B#O0Q4_QN@I7Gaj$zWe zQ8ZXb>gL+*QXrQbk58kFSKD%0;UVBisMbTL-%_<)6?$^0L;O^tn_GsYW8;;aq#hBx zRc8$=sg9`P;l<;F?wt-6-SRL+Kd^I(__apgiUiS$rJr--Hk_ zc8Pm`b&21fpMsWU!zs=uDot;14x3hti7D?Ky3FQ!duHj)L1n)|dN)6{;>{XngQFuE zfv&R^oC?cz2Kv6;-lpp^)L1qe@@;+}0W^;3tyClr+>lx|!-PX5?4Qx2+gjO0sm-cFYX+LF6(Pt7Eg;`K>@`&kHaqhpx5d*@s;TzKZD~8A8*~XTo0L}XarkuD<^LY+-Lv*JHpJMaun_-dtl(ku zYR_WD{h0J~?;cvu7Yw)mWSFI>v5-RBBf`CfB282 z3sS|-H;8Y#@(oB~Gf6Uhh!_9q#hgA}_n3^{8|jw}SpHs3E3U(B zy~!1G_uJf9hWRvjm!#rk99B;y^@%ZerI2dQNCY%IKxeXKC`X0~~NbMfWydd*vV z&e_{zFDMZdWl|9{Dp~9aH0|&EEZdv5&|x3zU{i5XJhZ(gZ_h}tP*8(qPc!>$1lJlq z_?H^jxgOg8k2+4^BClTk{aUR2w|Akl#huU{)6#|hdh<%kRf8oh`VT|w#ohesbgknv zHs?1Vil^3Qn+%q@ z`5e=|Pq0X}A0GaknrscZTORmknUnrQx8{e(3d7u~8QivQt{Buk%|xF|ljK-p5y!H* z@S@V>#kxM-BHyC#_~RE(Tq#0+Wk)r;zMf2iztXxW;rR{E%vGy^M$e3djrqdR``y01 zirp7#IdnNB{UgHMpIuqp=JPu<@R@PnVBc0&kz_Ha!{pX|L(X^IN3|hLCKUca}_DozE!|PTU zQ59{VSTX+Obpbzqb8SS~ZFrC>RoJ3LfWzY>_vc$tc^LucEJD}kQ|)#3LWU{l^!vvv zK^v3N?RL=>0X+VC2a7c0R??Y`(P6+k(#}kUZU)xxV5!+!hBcKn|0b4ylkL9+$p2I8DX2-WpS(Y| zkM>jgVObHgBD_cRIZ~DQKl;Mris9_I01?YRXPZ@vfS%b5)Abh%hrdt`*C}NY18u-~=D3at*6eCnM`ur?)31VyN&v)a?F)H=X*cFSVty z<0@uu6{dX+qWY)3>;);pL?RN7{p}0a$h1pM%4_7=7{W?&6$}-=5>#YB+hxCE* zw%@$%y4;g}k9gO3x4beokq3_#&NDr*{W_8fS#Lnjn+S_RLywWRy@n709K#~vHfZCC8~pkm*D zlYBcIVfbi{Rs^`W=*d3zvGHy*acVO$)eBwi>Me-6<#+dwy7V?c1S^2{N6&-nBxrGl zSrq}(S9oHynT9vt{6oo;z1eCfS z+)*QS@vgwf*IQ_BZ5Ja~cj9oY-(@tmMY$IkQL`h70f+yh2_=)}XLgbz5ZK}4H0uYE z+bRTr6@4rfOd-Ag0vG!?e@BJOz#Ky6-0=3re!|BCbE>@C#P0*V)NkC<30V_9MjHI?-!^Qs1hse+_pg1nApSCYn10R1Fy`-}(2n`x6_S*T>yW7f# zTr69BTmNxq5h>(2Fpv(%$ApE6>x&tylBg3fBoNF_ckSFB1D%D7{hNx05!iq>Hfhi8 zi(P<^F9c*WZA;-Mu(Gc!_w4rTzal3&m%k$4vX#G9zHQZit(@eO{yKJ2T>0zRw<6-- zSiT)z|KA$Ru?ME#i-jCKd*)z$+0Uy&1*)YTr>^a$pm==h?L`U|AtTl;C;G(~vXNfX zuvyaNOOs|WY&x8ZbGLsQIaXNL7CIQ(gqOk`U8K(06UgdL`c^HHQp$dcu`u_IwdHf$x>B)g_JCc--lFQhm|c4Igl^$-IlDw8zGZQ|GIjL zbnZ?#S1|pmd0v>fgN-q%2SW#I$Z2^<$yHS%BwK>!=hJfGz?5SQq~c2+`ba*SQ`bm5 z8^WIZUI!e~(bczukF#t6CyvA67=*8HtC>Sv3dB9({i?~1K~8SZuk zDE-Qb7*ba_-~bK=>d2&uVwY812n^fq*}wo~v?p*%&md$bp+%F;B{5_5$4IWri*A|6 z6)R(klUCPTSaBbIMI0bm;29`jlU?k0Y$s?;SN7sI`geqCajkaLHb4Jn{rZ#sP?7uB zyBbar^fG3{2YIIsjXlU|hEBXqdUaRt#zM(^R?Eo49AtJclLScSetSPyIp(Jb=}_Be zgqpDFjnyfGz{%&PW%KQp{Khzc} zaVy=O>RSccQMeaRAN?n3+nJc-PZ9hUzB}e2jds2KWtz8(FkEpepKsR<-CXT`*cD@- zygui!iFCtZ!t|&F+^I|#AH#1LHOxiwD{w!CK@_uW)5=nR6qml|@TWTqBQf&UghMVm z+ruqP5_l>rz8f4{CXWi=WWK_R8x|MaEVW&?@cb-o#6ZrkoHp2!x#6|)LAK&JSCYQs zd~3ni+p?S`Lfa6 z(Rf`^ZDiV0HV|{T!1A{5_cO^QFi{g>L2V3)h2~e+1qsc$L8GH&re4TYWf%sFD|4OS zDr6KFp2oVItB=qXE0-GToO%3ixp;O;(DhjzpVK>f*`s2i%MB09W*koj{dqt7x_A9I z({tr<^Cq3yktPG^`7bbKejj2p#rfnTG)D*J{iIO zc}d~tDdsLk7rbXzZ!cUke^n90c3~sE<^>EhbtjrU`9h9lm_}WN8DVBiYVX?cN=|_g zJxp;&@&H(0-A{GlT`(0`odbc%!9Y{lIi~5wapg%T1Krih7dGBXpJQUE(A6<)ed8po zk*>i_bp}#Bup){R!7D5eCM3U$`Qy_=?1UPopZa>_kKOD81Vr_-rTq^2!(b0Z=4q|5WR=1aS*Z!7(Y zHfT+4foaN#Zo{;9<*`A7JjlG{!Ga5P8{007r2AUS_eU}pxGc-ED9_$$x>u_(e6X$U z>fy-}qggpQ_TcXg)+fpr*&^w$Z4c^XEee zjFV+*L%}T_b@O}2ma?b}Q|uNhrRnhD^}8C^3@f_*JP1qcQ=!VA(+|Ip7u~$v51y<0 zlN>ZEjW$H8g2vr9ZoFHoR~f8{Dp;G#Q`YnDUhWrIohUXaTVLpE0W0`6w#2VEs@-OC zC3NW>R{=~u+h0_exHN4FTfG!7A2TT|K&E@F+w)t3Gq_V{d1jIXDzNIbSbpWVOLwn{ z7Gw|<(nDY%$1?hkYs201mnn2r6S+JLD}R?S_9TXm6ozoPSCO@;DTAiKJn1)+Dc#1X zL1;lt{zxlXe;_n?z2@6YO;cZdcS!a6$Q{)hB|JOj{q1D(%&kb!tA+q-Y%;I?;cY*g z4(O$)nv>9r-O|?AL9boc>rID;gy#4vXIdh}UiMvd@C=c^z|glb61tK1J*n_h;7{hv zjyFv^#PbUq8`ygP#Q5HWJ0k3(g|M>;F|(7E^UmgE{cxj!_4*S#C0Cfe>%#QXP4P^9 z<#N)%+Vm}UL;axgJ_<3dWjp?PU4ohJk}jzWcAEoIwuwV4JGW9TTRB$nc={E3ODu(c z8Y>;ON~OkB`f@{wiOz*K%XWNV(zQ$~-8H?l4Yhg|Cl4vt`8|AGdX5D*EJTS&TI*3*aAhtZlbLZ%YFSkB+b4 zNhJ8Q00|Bw(fyVonT2zfMYbvyc9&RYy1bzCYhpn4q-Sk0b43<4^=c#|(QWauO@iCH z;`&^R(MDlN+Y4HQH}Cz*OmP{0Kch!~cs4y&I-uNrsCREVmtVY16lcKM0BrIABH@;5R%DtF20cP$fkh?yMyHK+M+Hkj|+aVv&hDg=-LCed#0-6C<(a- z6#FP6WxKtMyvN;6+Ha&5mUX$mqi;dH!|>RffK6@w^u(FRWy`nRc5IB9R$5FIJn0Gi zdd)iVN#({0ud8+l+z65pqqOpC?4=BuT}AqG`c#nPtRf6ot?TXjAP9FwM);3Bt>sLW z={jFr0?IFIVA^OP)3`&lPC9s0*G6YKX#5-F5%!xilg-Q0e*oPzkG?yZ?r-W3JZz2f z-7qC|_|xtrHD<3v9vp*uM!J&%k2{F}bh56J>vpDu!q~0QZ*CBYoETn%*0gl>ER>G< z3^)wjQ>pkV)_eEOLZ`E7U?(Qh|0l$+uVaEW2}%|IsS$M$x1N;ET$pLOlN!NCKl9E2 z+)&=R@xdZVH-(L*ftEM^vwWY&em@Q?Ivm(bLhYyllp2csM@SA^krgtJ;iN&K@fnkl z`Ke{S^DRjDo^g307n!=YQmedi{N$^}?x_&pnI{LV8Y(wmz|>oIwc^hWw|ne;L#7pm zVm3F;DqSnr7fNOu4}WuO*;O%p3Aw?<;8ch}Nik?8d{re4k7?h6z%YGo{zWDh_?VBgx3z$#d{@;cs4pdDCGFu`F6?HzNzyzBZysE|dtl36@Nv*@KPE{dtU)ldZAsix zfB-%iQklO?C*3_I?g^&<>GP-Ekb+RLYLXn|QE=FwDjr-V#dydo=~}O&bs2H29a6l+ ziZ(n0ZVU=+ch4iau}@Z@R-TfQlt6m;CB`j(i!*{*MX^``sjfxC7INvFDnra)co3lf z!t;Nw_CU90Sn@M|Iuz|W(x?YFSXZf#q7D`+6uR%M8YGjuiSZ!(dVW4UHgs*kY2b#e zK7@+T&vraJLO4rL>b2T|RoYqRklHw5ggxkokDlq2!5vu5>x;cMnr{n8l_eHurb-~Z z#6-z%*&&#pB%>L4q87wM>3xTLU0NWhNhOTNY|n&oSa1MzUogC+YhiR)KAP_6@oZ8w zSzh*Cu+lI)$KOU8u+5o6lV#-hVc#wH5LUGt&0NU&NcqG6GAs6gCeF~D&V3RIFe^qD z(R#AjiS&zY=TXQ6RIc1mkn|dVy~@~hNBjF%SBsLSlS%dBvCH7Gt}ErolV0O=YzrPz zjiXO${3rVnP=hX(W~Q$3s z8}6%W)|2f{%$R@VXhPBBH*h1LVaG|0-47iLw#Sxq(57N&c^XS1gkVb@&W;*3WuP4Y zRufj#lN8$p0RlnR*llK;dIJ6dc+a!;iQD%i324|BgcT)rwr!J&kBblc^3Sjzw_6uH zAZzB9*AGL5H-gmDUl7Q)aeM}^&nz0uZ;ST7Us@ASf@mPDl(3Z{lcCe0J#g499KGE% zL)rxmzca_SKS!%Nlkmb5Y=yytn+5T0&#vVm!1(l_DYg}&o)vm&-O6p-pa18!`mic9 zaMw?lj&2W+z2KJCi8mB9r0NKpC<4Rf*P#WZo+reJ$0UgZgHJ(G?El=^&c=R01o%(7 zgA=d>$@**>8;~gdU%nz?kK9)JQ}PZ;B41vCs?4$qY%_JN6kbnkN@|@Xy=K+3-^V zsmN~ufEoXd$NzT;R}G8VOLw~;KaGhu-`imt4-H%|NGf0G6gsL$gXEL&bxZgvVSI8lxPv0;pCw~yhHFn!%ZEUKq`>q+x5x)~Ttwqzi3%(@DrF|w-FAFj}JM(oz zk?IN(<_WK8@wd{QtDP!Ob%C8<&WhY)ND^Mi=D zD{ik14ifKsGtGcdqE zQIQCTy{AT2zj(=fOdMAK(~Sss!mf$IuUBkt-g`DEy#13E6Y11HjO|y$+*T`e_jT@! zHaxCnp>-wx!Gn((IydjrvoLj*#GTDM+k4N@SZ6=)eGZoUhTna;HaCBa%%}M}r1+=$ zJ02dnGrksL;~3ajY_%Aqa5Cdea>0|EJ2}r__Ef66wACue@D~MOQfw{h0YUKQBLMk_k z47(JKy7)j_ftsSs2;E)jwCP*8l@~fffxD!H$6TfXumRw#rdMB1h*9Arb?!%>P zHMoy7Xl(T(tmKVI!Uh(Povpo5h=&7Dvyt&oewOR%GzcV~e*?~UW6Y_*X(___V|$Fy zm>^v^zw68K3&itziRV9er-=Y=P~k7B@z?>)4^+6K4A{AXYaw6LoE8T{en-3{ zCJ4~{wTf#-5y|H!l7IggO*oPKAZ7w?n2m!=waNmGRdrU=eTR=b1UrAy)sYd&#}UcT z{#?R{ke`W=&#pMe5Z;Od@-GX$Qmq19RfVdxcn~!2rG+s@45!VY1%Y-DAJe`t{820y zms*t$vg(~q4nB?@iima2+*5?q@`I98EaamGY!3?=y`x3G~1Y?^PzbN@%Z z?tu`*z0cR}L7>;$g1%GJk_f#PKyPpC#EfC$0RXRa*{D^W18g3?(;YxQ-+zOMP097T zE$3pW3fM3`I!!}n0}8q#A7spl&PSjZvGG7eZ%b7B?@2yi8Hl_9ubq3%J1N4~5Pi<% zuu-iFK%iGUN{&Fe{}S=|#G`c>BJ_p`^b#ef>BxFv=ZcJ%F*`b+AC6bF*Fl_)EjdxL zmu0?EBFD?Xp`Gj}J1N80K(fQJY&I%*A?%k6AFqnHXdw%ziE7N;7)OUNL%`q1{2IyI zDRxJ#DjBhDI<{Mv2?4M{Ylou_f;K2RIKkrHF12-{r1ir>Dkd3ogK_YKwHL)h9D>Ne z8xm@{omKVJi1e!?s-Q0*sf4&tRCpqDR%WJ0%}x|qFA%gMdfIq5dXxt?1V0iaDoZBy z?t^C=LtJUSiB5$c=y6m_bmGDM1o*GV?#kzmGr{~iblT*llQ42%5Rrk|bVe>!#Bd1~ z*e^{6v<@`kVTGWZ!7QQ&Xv622vIgR;dlD&u!>U4}cq>aI0@M&M#fqMrE#_@5VMad% ztUfeeO~ieaM|-h96CIS=jT~5kMF?81kBr?y$OS=&9tjSz2tw2>ESCr?)PRt(rw*dO zeMg80adi>W5dcmzldUX&-|5mx6W##?*$=26z@FwMlV>pBJhynJ%S-_7tzr^iPfef#uX=i*f9A~5QokH#hDAb@&J#KI1(14 zHr!z(fsmj~w6Aqz-!U45J~rwvO-&yjv^Zb~^+5S#xq4Lb4}sy2W792uhz=bjiokv& zE^;fnG6JNM7t>X_`fuLzgDXHej%?7cO@D;{cZ7^ zkbHbZ69WmW)Ez!zC=7cKk;;q6zTnmdTQ|7EqC+(^_Vi)!#HE|)v&xtmJox$@hu&O-P;ix)?U>xi zdrAx?wj2Oa`=vxm6}%k?Ba$(m3^Tz?sD0bh(JTSiS0Zu{WRtL$#DNIH0n3hacRaM5=C){{+BV1Sc0t>$ZSEinj8qyC7y47HQWISbYWo# zK$c8eDP-aMk))Q9&s?QS29}?{CV(WbGr9;sjc!UoBoR-efnn<_Xz#GF7mzJ%v0{K| zQNH`JJ#c{tB=U^Vow#%tNiIP1sF8$9TvZ}0%o_;W!pa|HwjasjQRodmq-t7yE#1mV zgy>m15^OgqzJy>WKuurDGH1Xcn{b6H17g|4fH1sAl*FwvoIW-3>$orW9#k{p(pGgW z=o7fC0b7mTP8i6oF+>3{J=;%9jGSSF50HK}?GB*19Oek2P?2r#0m|@C%#eO9?>+_q zRk0Bxr(5e)3!>Ek1V=ys8!!K`V9q0DqFh-CJ(?QH*YE@-$X*^1t3JyX@x5Ecy16zi zKXEx-Qw?tb8lmuKnhE_~4`RPhZDu=Q{)&jt4_PbV9pPaOj<47}rFnxp4zl)YV`jck z?9y!%2Nf4pYyqs-Ce~&A(w-8;q8QSzI{=UrdEO!HQW&IPR4R9`;y&&~(yx66uRx;T zh`ECGL#}xw9R?ZHfJTHUlxWe0PvD^q5Ob(S2RnETtTM=MfCo5MMIb7^q<2r7h-sV# zsD$J3BBukGQ~M#KW!zbZkGlZ^J8j?UBnin*G-3kmg7i&96GNdxxCzR=VmI$*;Iz2F zW>(CMm@q+*u0>3#YF^?-h3_P0YevR&-$^RntO^8Kh#O%sK^jm$KXgBCj=h%*1kKIl zN&!Kyi6WS4PfQ_Uw4fIjvQe=I?wkCMR2MFcdF8k(5wPKq?M*27ic%1Y&U@QCu@f0K zGXO~ExpTvDS5iSITu%p~!Y7cNKk$^EE)evFXaoLv!y;QoU)2R12Od^xWkBCS5W;xZ zM}`@IADY(AdcX%t|Um#!_#9+-KK`Uh&N5JWxPbZ@UMaL=r{fFab{uHm@C~ z#fmGs1>{)r=)Q(@){iKFKOqJITSgg{3`V(VDD{9A^F{?U;>Mw4v{=7*g#OEybb!Y_ zc!Yk(ESjCvq|me%uAr@^t@)oO#{Nvuh(SqSNU5tbi7FIYdeMUD=1>?9xWWhw*0GUL zL|ta?2WR6Balrk!JIL#_95H`yxbP*2NYJz#oUXJ3-xfun0VijKx+TmFWwAU%sd~H* zxP@pYq_x8QId+QVN-gg;?UkPY1fCNt+?^?(JVpaLeqUrpcz_;wKGt9Xo;n4JT7 zA7$YR%bK-OxS}M$zw=A8ICj520J_y2DvBU#^pS^WDTwHw7J}v9_xg9zhyMaVzkZ*l zM4KZ8Mf)ZO1x&3SK@@_L6!of_K%ZCT0$MrH`4BUX(wll*3S$BFF=Nz(Zy#d!6RVH3 zNZ~p(QQPow2SGh0FK1lj?gRBvZMuB)_Ot zU4tmdQo#URVnkgc2p3_)er#M5VB$DkH<1J% z#OZeb$(slL7bW7qNN>7zJQ=wA%&E%T$L+EAQeb^sWE~e;i4USA#K|#OZ%9-i?ayhS z5Z#1pIPrl1K354cpq0Ra=Tzbz?88WC06@7PjYOg3l_vVQZ(~P-NgMv)K%aMrG^^qd zgM^ED8cUea%lp7b*mNvGs!|k36o4JG6VsLe7(oE$JI|-kgzpB1?J*h>*wZ|q5#M)v zcS6;wz7J8*BRcQ4S~y{0Hi$;+-h050X+8oLtkj-}frbKv$=7km?I28Q5W~Pu7PV3m zq9s63ChMo2Xfhk5?{L7%m=!$=i2--{8$o&y0TH6_uchxhkTwKEajKw1M%d&>+oHy= zB@zRrn|lA?7(ezj*qC_nnji-{15u)u{%j9WqP;{VauQCtOVSB#r0+1QRfj`YAbp2m zEc|M^A<}ms_ImMsVnijXJZIoSGW%?>v#6Oq1~d+*f}0TnrtU&wfMcN-11BfUL-bkZ zvreFZprvL=-hPJcz8p>q4PlZAA5Vcfk7R!BACV>}-kKlg}hoKdX&e#td66~@KfqqJ&k?nba*4R!J;R+$m$Ks)(jiicviBcGJ9nz;*K3WHu zPxBM4IP-BazoZq)dwsY0K;nuD={63no*(XV*CjG6M*b zebM5_sKG~*6Mgg!sUd=GFNR%ZY0-C8UyBB?55+LZTjL^^)oQ^5g>53XYxC*BL5v|% zD4nYw+(E_#bo34M9rqr`?pE>Bi5_czegl&J7EHzRsS4~x@fN!*h#h#ylSc9wWi(W( z_yJNS>O_$?D56p zfgs}-KraLQjrL;JAnW)QC17rR1e2}t_MTt{v^M9gwBjwep=*C3eh)?! zT!NarJ2}KoGT6k5fszid540W%zo~E#9&48VNMmO9fn8sV+3e-Uu5+nb^qmAjOntbF zq;>xWCeqx4tDQsT*SoCaXL>=|SNM#V3j{8T6P@uv;O9eRv^q#e13+b^NXRANwc(jI zT3hBR^5KVQW86n%W)<>ad4vs<9ePSbZynt3FJcHG;Qt}$JVfV`g9V(f%oo4Bdz*cZ zWaqQz0eWIkQiWrw`v+i1Q5A&t=Q zKXL+Jfv2=}!Rr5Qp)vzKG8$dvE*iK1Jl6Q_)(~i1icaOMZ;;bT2RKZ!A+s6s(K!%7 zZl)ZsYSlPg(>gZp%)A=Tg7sL+fFK-faEWL1mI{uKLvW9371)K>R?0#R$0uP@98~cP z2<)~ur7j?_?};bJ+yzWDdF*RFG1(j!?y>xvlHlWDXj0w!l-Civv^C9|bt-3!RHA2Z zTc7}y6RS>~(%FZdy%DnmNl&3yi+JK0J=^NE=gS*OJB*IlexEs`Kg=RzeDoyuV453R zw)H^zl-n{6y^OeUF}aF{e}-zOK={|G6Wo}&vB7Ne2Y!c#LpzPxpW$5_xjNgpo?`@Df(_^?>fU1TxVw-$-nVM^Bb^yA=BB`k;`Tmj5tHq8F55GI>KAk)f7{ea(OZ-Rv-~l%1;lCKM#|^@; z$YI&2tQ)9A2;fC(lvfIMB1*7p$?vAT3Or)SX5^p~0#YWAp(xy9#}CO}i?g#&u>u_y zIQ0}5v4XqG&dtKYQH)vB0!YbEEGb~hv(5(paoAWSZq0d<4`T~Q#w!$00K2v-03 zv(Nh?JW`_vJ+XsO4cw+;r?uuLvNH{oW;?2>y%z>LO7K^txLshSlbW6iBQVTE=s!LN zk4CjgX-NEAl|_VDQ6gFli|z~=jQW6`5ecvYDhFrwzgU9R0|Odk7$Afh^Kk_CF# zaftPeDv_OkvSD*%ys3h7QtgBtEtgX+?gNH3!>V&y!PIhNMC8=q^F=pAU;ks;lSC~7 z0$u?Dbwk>&e-LL?d##lk@gGR097hG5bGi%rT_Upc7NInQo}Q%`sq8P}nEWg+8-s8X`Lvw_6|%;KZ{LjdeoKXFEr86&T)4MiXm@B(L(NgLH`MSCaJq4r1%V zp*k<1x+W4Zi13rl|LQ%g-pwUu$PXjQ+emwazzGcbS3T`xA+m#Mf`tLlru>8kXMkvI zGg8mXwC>y)4#1;#a^C-E&Izg?O8(aX{~92%9Q|v6e+>|BD)}1$|3<*S5%B-J6jY41 zTAtpbAv1KHg?VTsBZ$!H{Y!9jW7X~GsPtF4GdlHesW|pGm_CYLPDQ=~=Ah5HAAT}j z>r=0vVE>@g)W?Q*{PiO?LA&xKI?nM5(6d9RA`L%RgB%K9IR|vNTP%`5d0=zr~7FUp3DR4m=*Lw@7~3^iqqIc(P$g1pqzF1|WkUe%G~q3t<%F6{x&dy4*Z! z`Zw6-8QZN~x#LbCS?*8$K)Vpe$j853gcoP)^7U-DnFg4QyKUIP*oTrdIw3&-_@cd++#r|1(Wq6}0}l zUoKZ}u2x=Yl0IoYRXtnRR6e}9$`xAQAH}hE-ZTA~qI2WEk@fY*DLPY}J2doG;pb0y zls9J{vinYaJ>H@ocyp{Y#p8Jl6{la=s~gK{Pga~R;UW)4U5fl?7!vm$k}`AlpdC_B zafjni8?oG_uHIaqjit*JKkgvvXOI-MlD{@H?q=AxGyr$AJ~MnWXYSE3|4lm^P4SG{ zq`p%=w2o=?;92)i%ZsBwwCfWUc!uYm?zbxQxTSD#nu33nx|UOjCnJzAsVP3ip>AW&_30axtzL`5^ zIKJ1}0b6aRy)2n-gXu30m!{-87wBvi$FG+zBrDT!%XZxKXtLzh%p^z7!#=;0jC8Gk z=SE9WywF|TH8e0JH_qx4u+$i4yQZO8zL9%9U0k<4SVB)H>Cnjy?8mZ+dIlb%!N?W@NgcK-S1a4q|D;HxyVvQ?R(<{KH4+zL5^ zithRJgeMbn_wUV6jOauJBe_l~Qxnfl@3jlHTCvt@ zryE~$eO%>wg5&t^7doEXeD64iQDR_wfD?x@L)xv+-yK76m&kQwsv^+jp zsm(do+Vx7zBY=Cf{ndRsPI>DO`e{QYj)M&bubhnh!|_LXPtG(VEhuClyWxkFOW7Ko zr=gDGOBGli(#FT*K~V~aemBNO>*Sn`s;~Y-zN)=k+!T{^IdvCWn)Hn zb?w6k3^UqrA|@u?-nDar@!eqcZ2kK*rKHH>9!r56oj<1bw{?VU{7y4d(mj{*WSvT@ z*N!Jm!P&H2{R9247fov;!T~m}XVYJccEk!L$@N}OlQQpAxH+5LA$fJ@%`-;)lahtT z`=MOASl~OOj=TI}AFGPlYirs&ho{s=-sRpXy!oxiujR8kh27f({kU__-4bqPy0~XX z+0X?)O_g%9X!2M1b?ZX&%Ga zJQlA-3om4|`~LVU5z^enXSw)e%{B;+|nI#@VP?P)E{MS8i-r zik4;H45~PzdvyL-zOU1L+EY4g8Na%BC?&2emU?hH>gZhhCdj;qt!5v?Rv%qk5s+j= zgA+6T_GGV_P@zsu?wD4|zC4!B@skCbILd2Jr&UdGqMN60{0Iu9TD(P5P^`n^dH-(3 z7e~wXMxSO3t?KN}m>(@Sj#d14)26`0!LhVns?jK*dh4Y0_>a+w*V!Q=f@Xb~J<<+k zk``azbiAC=`~Jp1buPGH+I(rlWAWslyO)L@NnWetc+r|X{^YS>|Fh%0nF00Mfi8U# zzAFulSgp^YHtZKEvh!n0Rq{lS$d`O}h|H<@R&rSObAlR!oLT#$Rl~-|Ucnz7p;s&7-&XD_Ym5Gd#>GPCB|*jEWPtX`%UtMm2qb-C1t= zB>(85@@C>D?NPC-RD>KSL=~+|7V)G56nD| z&x#kDQP@q(e$~WnEUf-tRJ~gpF86-+alHRe>(hLgwXW-oU!3Pb<<->z4LKf-)2E)iE?xla zl#pBY+@O&BLGg#`DW2@Y7e=t(r~H4GX-RBZC%jV5K!gfBk0{p?j7=QxxQ(-g4N z7n?{{xkhFB$aj3^H++=Oh|~<(Rx4LCaC6+ekGkyd)el#Ro?frw&s1z6fZCT~qNlM7 zue>t7X8NJ?g3ANlN&Jp-{5z-1xdcY=@gRaud!m-BXeT+69XtY5gj1e2m%%StRN<+Zg;W*m#ZT`3lrs0<^x<3K!yU!9N7#PR4@P>nUa(wWN?|e0Q%=e)))v~OhkQCSv~=IUspC(28Z9!SY{>~p=h1pSlEQH! zZnzc?&X;v|+l&p~m=30m^}!(++vTphmXPjMhI-GdSiaw=cv*)klX zs)#ZQtGp|`CthzaG?kz1TLt3FqY^ppv%eNLXZ6pH7F8Fp9j2;*S^WAue$#=s06UdJ z?xt|H+Gl7Lev+iEZ)pI+LAs)Nj5JQR_?dGF;l|0vw=|Y74^-YkXhCnO_|3zV(odqU z&re2SS~ZGDpoS8*mSvUWK;0k*acsHhG35nJ`v(n*M8b#f^MBY&7JNSrM92=#hm#Ba zRNi%WT(m|u>L(p&mC`i9MXcX&Ub-fCjsCvAd{5=f)n-$EDgr-<&Btqb^}6L&+^#Ll zw`8t@Vdt4C&1}&`G^+Wol@|jJbZ!~sbeyxuleLz#2s>K9QVc4Vx2?vPI;i)T2IF_H z-s>-wLmne@+;bI$5Am5!+P_`~g*8etZOUhP~x7(tdh|ce@oL(Hm>H>US6|MmH#lhrD*&UvK~NdteG? zJ~zPhG*@Z+$%wFLdK0n)Ty&8joExCV!JjJBGVD=!+pf zXt`ryj=VLQR^ubCrUqk2sXbaDZ+dARHTyuFAE`6k&&+j5iY6z{>&56|DzN* z4PRw^AfYEZ66+?TfN){TwX1+Fw=^ix?#(FXDfuA!^@U2n;?7)>l;UFZlZ;cAxF$u9 z;(Y$9>Ma~ktvs56n{o7n(TnFVA}Al%d?rt?OX6L>rct$ba9Qo$ES=98 ztyo2`ZxZf^e3l7Z)`Bl=hlpa${{*UBqzE~M#Eb1J31wSw$W`LDxNth#ih)fJGTYy@ ziU>P>?}wry+(D}O_n|wA{vSp+T{mVkc+MqQ%rdt1WTNn;9X)xx!q`|PAedx6yQ}xV z8-JWaPj`3w1X>V;24(g9!DB2B2p%4@?1gZ8A2{?rCY|- zYy&wPU!i1HMkyMdWAiXOn+7DD`n5_s3l4RunbUGiI)_=6DoiGa1*#{I2S00-qm3mC zu$Soy6^H8`BG9B>1C$a00)D_{z)3h?9r^sESHCVk7~R}8Y%z>7`ncF+@L|Znhpv$- zinha=3NphHBC%XPy&9}>wB?ZiEy(ROi;62eurU$oJ1vDaFgHkljR=CyaVjl=EAZhORhI$ z$2}pa&hW%gjm^H8*f<%udX&qDCjpJyJw?$l#EZpaj0+27lU0y_5hMYwk!02EzqzYR zU1=jo|Gt=QU?7Cr<6Y1h!_C64Md4_~U_cp{HsYR^OHUepLPkPly=u)mD23XtXBqt7 zJ_$CK)Ggh)I7UY>yjEJe$cW{XfUy^W(HZ$rI_>B&dP?u24lX)-UXhFlH*7w60?yAwY* z*aElQ0TYO;>HKGbMq&A*joil=paQsLH~g{9>-gcj_d&D=c!WjkE0^S#)=(z_MphR2QoH(hhjYm-89D;FH$pFbT0UC3&Cw)pM? z;*>yK(=b6Se?uHdrJ#1~678z@bIzd%`bk^@a*ntwIyaYc=ktYV9108E4Ds;SpaZLu zfX7s^j7f9elWYGzh4{i_>AP|HHA`%{;hhKC=AV+a zsJ}Q|+>uaY-o73!tdXAICZd{B>(O@sxpTGNVpGrk{Gg#sW-vz04;oGVI$4U6RHo6}WLmv9x zYA>=~@PPsk;|G49^c2yzbW?7j=oF{r9IT$+bIrGQ z{A}9bLw;Cs&SP5Tccxo|F!}DV5x&_KmzISac`HrSP7846HmFHgvYHfCQ*LS(PQIXx zW}MxRLr@tGdDAIluKM8g#1f5mXZNb80Y1iCC;X!b2Cf6GvwguO`ccvAe|gg@pvE7H z%6zPd7(vb-R+DumU3>^w(|gZ+AW=d)ACqrKnmqPzsmtm2oS+1jMKy_6lLY=bD7J=d zr|Hr3eI*%0;qc`hq2rbl(&C={mE8N7rKxST#uM|=vxUW1%V+vRA7sX|NXyqes6)9kOg?Zkx}Kgm(0tQR{&MRqwJhL775r3mQx z9WL-NA=^y{QrFD)`ZCb6Ea{yz7*RL=V3mZm_fgv4fiXff+YY zu#w|s=Jf&u6{R4L_(y~AHLftqv-{|>l<$>V`&7D%ix)J4~-80+yUmD`` z^<$)_7WjElZw&N4!Gih{Jx90#YBBOfzvDh# zqUm7fLEZt($l40tYm4@(U*YzvxL~ANU>On)vso8`Hti~J_wuJKtVO+=j14?!5qJNa zJ;}7%fTAC?J4TH z&2q<8)N7HbQP9$<$Dk)WlKrT~yJy~xpSt5#jmM^Dwh9eXtrjwhVLmwsR$7isf?G00 zE$#X&1oZ{UBGcV5V~CNX?&b!x@b}BBLYS$k9u}E2A_9bby(-zF<+P^P<`u5f0mKp~ zY0cL?x~La7UDp<-$)8KM;Xcj3!NJ_kCB)28apO!1eZyFZH@usph>Z~^3=0>s$wFWXO5xj8!DQQ3wWXw>&`zRp} zA#Dpyz?)y#G2A8Ua4v%)qck6rM*-za8eAfVt-J|QiYU-&RE(2#PP+nDHtKK^>?Jna zAHh%%IjNNN_z4D(j^|s5Cdorp`PSXqhsWw|y$hE_FZlHmOLcb_D8z1_xt-r`f~1m& zC`c-j-Z$c|{{EG1wzp;C;Jdd_njg>RBRRAzI^9}hbNSLzc6^azzCV}W8g;S%G`co! zRD1K1Rhqt$e&9uv-`C{rd>mwCQzmNhbEfpg#o<6q!n zMlY7K?i@WN_L?^%K z0lVuY%aA*4u}e8JypqeAMC#N>Gq=8d&sh%pTyYoq-rPCh?gLz3IW{$7SkO*!$D zVLd)#j^Ys-cxagc)!?tgNbcjgNh|d8SW7Z4^qx1(wQa>7ub;iX@kxw@FQLT{DLWZv zHvU^kGiLQ{)7jAPZ8MP(beR)nm{4U}Xz>Nt^RDjus$FH&B)M${p{Z#RN>;HR z_ijRou(;{j$#sD`jn!OyP7HGAZ)}H{NA%z&`!l?j&nv`dYEGrqW+~Z0aCSZRV46zo zoKn*A_yX8NE&o>rb&R`hMqhGKo`$>g*K(oG2PP#=lqe=p6CvdruOK3L*dKlpXO0h! zxOF1m=_l2q=a-DWd>29K^hCu4yNm9MYe-Uxu8n$;BBk;@iJ#vzihQ0>5+PE#{gTu`YYGf7wWAGdf(|J&-?RtH?E5s zth;6VmfXw+{-l2K`?MTL!0$!wy>%H7z8MuSeLJ5ep6>UFX;TYh5Y#ttc)9(MD}32& zYF16SSquPu^a0T44yavb89@0;l#Il+cIaDURW@}cyT_nOZDJ=_ro%5N5FG$7s?7f+ zBaDBeLu1#TjNk@(kCHQS2-R^cpSf-EdLF)?n4%2jmj8f2HWiOWs-JJfF}aqu0g=M; zg}}#zAi)N=(qxRbvG1w$;?+vH^!>`M6@OwPs_xv*Px-W|renRni{3b?rayp>`9i-g zRw#T4P8+P+H5JU7F?0#D(6>F0A74@PcVUO82t-}WW(qH}Pn`1RTrV{|+- zUh9@po4|ozK6Zx}EI4MPZ;HFR7Z{$i>Sa63{4<+-R02zl6?X;8dHukbq-<_AKfhyk ziu{mXY5TW>iKVe@19fIB&dWDl!R{`o6`g>7Q|CFK73%dO0OPShsw+I`w%6vt%Yil_ z+{eiL9IJI}sqaMvJWWp}Fs|0y=bsAfjV@*%S~(4NRj!_Jpy9bAZ(erc;Y}+NiV9|5 z4UM#QWUm)N3?(;52hs=5Fw->Zr+aLRp5g|=dJ36(Lk)<1ROX@Oa|k^5Y?&7q2~6%!khnz4XbvvAuCt@YiuYPjRJGNR5v3w@yV zfc>lSA@Xa28TvG7ajnSECET;#Dc4crUmZCsH~2`~LFfF7W|r`>gPHuhH{0W4@KD@1 zlW>t21CFXlz|-9^ikIEMS~**Nk1jLV8I0PWPqUo*3Sl93 zeoSMcTYXOD5IG$V$6ZZSHtH*HyWX3&8qENYH#>}slUyFQ;?X6rl?|n1vPTs%H8{;I zBE{zsS%WWu?Xccg3blwU*VZe5j7uvV9UE{%c8mM^+&S3~yvx?KO1^9IvW@My1}{%CM)_r>c0$GQHH9iWz^489*0$V*FFvEV#V5@r z4exXsAMcNW?dn-kTyBlpiyBR10G; zZcZK$qgE5Kn}N-O+d~)a?QN9A_}5%ijG!y0oi}j4W|MMd1wVwt^##HRS;AK4gy7)Q zNc6whc3QkJg(P$l4U;xBO7;inRVgLp_DT~E)Yn-k?3Oj8xA*%)Vp1r;TRF4!ZsBP@ zix@k1Mo?F#geM~XXPdm3_uV>i!&~}le|W7*AP@<2gi|J|^Ol<0?jVgAx;GXyvR={j zeDRg*d*+f`D9|+qv0TYh;nr8TjDBSENqrz$Aua4|tFMtuz&4>~w>5ql=l*`q{oX;A z{IplBud;8H;a$EIl1jy=X}l$Pp=_cPUGRMP}5pDV7XJa zo+kCUwAtTLrFo>^`q{i;rL*WKYViK@-_htQwDYyzOsb+Oyf~YEjM4ZTW70j*RIj0< zX|IcQyk2%I#K&I>3M9;?_1-$B$@kGk*2fWJ?H!ST9CgalE*9z4G(y6Zj>D)E8D(y4+_nzE``Dn^xTlZ5EhZwK<+R13QAHt>&5oui)=X zf49Ednu~357u}r1{=WYpS7tZZeBs9`&&hD;@kF%5fVs|OY+hYW3=b+Xsqm>$(so?$ zfE*oXd>noTyAGm7ZLWHk9q-}rv+;=5#Rl*t#$7#;9kOz5Hw2ZOFmhA&%4a|6O;;{| z8reqMf=3jWJi#pFyi{RV<9Rx(9mkFauzSE`7V`l}D6cwS6?;7eK!ry6fPvbT^?1Jr z>1qZBbq63)+Z2n<3I>=%;IXeW-f}#7w6lZ6M5N5A2$*qXZ=slLc#F3sqs=UIvME0o zA{>l<;(6PE3R96FTzV#LRBJb_seqfut@_x;DU0#`Ft>@ZPoc9s8I{O7M?;h2NL$R1 zNP@P-1i^{Ys4R)ya*g6hQ34|^ra>imI@X?b#c6UKWdM__*m}s+s3u?H_vwUyHmd;Z zzBSU)NMLtJ$_escmT9zGT5;dT>Y{Q$Ql;V3EI`l0p@T4IC#Sq9_{{ZVeX@f~0W*-m zCCNHXckUApjgMEBe=v7ABgJ+BJOmn2oXPITm z1sH)~{MJ#yjWToVjN*ShO!{;k*;X}z7Afw)1v4*%&NwV5Eg}g<@qP9etZ#yw1!SD~ zmey&|jvL0m1AK#1YVTGM3#92+YyWEefR_0*q^83Jvt@>*Bh@B0vG2x5MR_gBLb&h* zMtOnDgl)O=oR-RXHe>#4${~lv) zF5z+hNhn{F1?6_0;>Qr`Z|C=5b-;Ny_v?D9|B`H*5)f0zoy6qQ_1nsEBGR$H*cTY%Y;-IRybup6E6+n zE#lH8`3kvI&eFg?V8XX-ZW^M(BaR`LYu6Z51Y8GnixA zM1FkHQ*(?-R2~sb@yTuf(gQ<|!t>NrX|XAylbbGr5?($o$*^N2#S5vOg|n>Q9OV@s zcYp}R0w_qlbUpo~P70K-gF=3*^eJnIliZBJ8*E)`LPhO z^7;`409yh*dIw+q+20vK%P%)iE@htt%)O%FCKm@L&uhu4Zi579wkbT7@FRdt^cAFX z6%iU)r+tfX`Tfm?yZi39V=AzDLc%mvk7hpI3^$g#r^dKht=F6L&|K@bdm~%zE3i}j z{GKImQ~ww%W7bE`)j!J;@)-x6a7}O8IeT>w1>`dhSUbUREvRzRgsMnwWbU+LL{HRlSOC$iDVr2gWT49AH=SN;i@GP@t*a^iexu|8|-+1iTkPKXq^ z>aDAL_3fbU`qFI!gn{v&nzuB1MXA5ivHjDRkqbW#8y%HEAwwArLnz z>$B&#E@<@NrIcKV`21~Fj9LM}Rx6@ctoFNlRAcZOvG&JPm)js+M;ib~8fiWL>c~c? zoI!6%KB!l&M7QG}e|a@dTEZ$`UZ^IWGF`lD^YgRp)2|-R&vJ#Phca03GVOnA*00l^ zgjD^{apL-S3YH^7mJ~VZVG;fH)j?a%=WVI7PwrOdxP{JIBktci);{GE6~CKcG$aw1 z*wSj)N!c}r8^1Ey6?vgpp_^7?92hTpvKRAlQ;wwQw2|vAO|5RII?Qesf8^O5Go8nu zex1P08LN}~);V-9Me2MQDU!H$67#dyo#G7vL_o0!jE>k5+4|SteDzw0!TKc;^>NTd zMs!j$SLDu={RvN{8eMf`RV;6*emS(cut9V}5Otv4^$;6}2E!f-89K=uPjo)2;tzd; zVnv2Gz5JB_p4jI`+0y+O5F|0!qOD#!S}McGcjy$gy*Px;-x-nwInFd+oWzg@(9j+* zz-v4|IoWmz^Rou^wQ|Ry6p-PZ35PBi+ILcAO{*4ew!am}bEVM_zdyBLcgp*Ece+jt z!*U>+MAURz>r*0<`IvvwmuO%g7P`zPsGpzyRME5ZUu!lFb4Y9dn4t+I&YZPZRbHea zuR+6b)`n@HhW=y~w>5{}(1FAWRWWm}ezLUND#91YXn=ALL&e9MMhOozhq+HXKY%|J z5V{a~I|=R%PkidRcd?2$TVYkTbrJD0Ut~hjnXt8)Xm~K^5gn%P#$U#lKZ}Uq!VtNJ zShh*RqzVXOK5=XARd=F=!x%!>>mMx*zjM85eplJ2tDfM~_;7`*?S?g1G2R|Q5uwR| zEY)z(inkg{$W)n)a=AW28CANk%U&FzGLg+yii1bJ)K57IuMACdE{Ye}!9QI?Hk_a4 zu1X;UF52D;3FB+et5qD!*cDoMNu2;X{f{$lvLtg;hItWE+6t&r^b4Aj)Ieijhh()4y*Z?!$Tpqa5`1 zv2>DOWHHH}wLVo-{c;D+rCm2TUcWg~#o^O46a42x+$#wPE@oih0S zkugO+`0u#4Xu#Ov{)$AiOW{6*ECAm?uCC=;3Ysi>Tho#kmty?kX#hCG8)Dbck}F@E zWaII+!W1@GOS6D=CoKoM2S+xE<)`0+mpp4>4H2&9@BdxO#=2Y50>7dwzj$r-Gy%AK z4cYq<^w%7RbbHoXLcLyg$9$HH=hFH2cVDw^0_PuvD`#g%35(COa(=05NiCu=;{rt(Jnfryk_f&{ZNJJTr zpp!Y1=8fQTiikgZqUXb>am4!#qGnQIBgw8sHTi83RbI>i5S9XgX?ln+~Kh z#4g%z#y6)e7)HN)Jr)0~c2e$OVxD|F9^TIxc0K-{UR|^cQ8W2<%R;EpO63p$2=h#q z`ThR1{uqCG@b~30lTZE@|NHWU{VJ5hU!$G$q#=kHHhXnR%Z>EEyj3Rp<~!fl?@cAK zBS9=W#~P z-n|K?O%eI}R`&;$WWh3Y_B3!y(*g{CJ2%#X3Lmf68+q~pAV$&j3uRNKcI1PIgLu=Z z{`8Jqhc{a1_E@i`GjQ${PP!DX$txnaN1YH=iP!#@61y92-BTE|KNUxIw;lWZeidiA z$TR)uu^alm&)269)WzS1@#vDY3VSD337F}QdoBVdxxmQi?|&h`S21Q!U)(dm%~H{5 zE#H00^SUAxR=wVEMvON-Z5jwYP4{(zKL{ z5QfifeTrfHVl{2d7wFj+cV8$jQ#it(OsD&5@g*}-X+#XCz+bD136|+hkoh(`LJxeq z(pkD1^NbHqXFoj1VmX;lpHHvBm(b-?T_TEj_?SGd;{^;jt89rruL#Rpf19E9s7tIF zQ%tAw2R9n$8Wguq=|uH+CY%1lFvb8z#yrYpkE_VzGXsru*6posZESGVHJ|g;iJBY@ z;}xC#`w)6huK|&fJob+5V~@!H7z7m`0vDR+&3Cz>zvnNn!@BKU%7R1pjnTmjr^L;5 zcs!^?Pq*lc8jHuYo>l5nvGtLdgl7)5>&iXY*OfLGh@)O)%rK`CSm-Lez0RXGuFw%B zEaKwRWEq_X8<$tF)K#;riYFMzc=3xH)ES~%;ui8BC-iTB01l*mjIRaE?9c(Y%${e$ zftWaFkwumw;zf9>h?igr{Unr}^J;;HP|(#_)eT)ZVz-bttoKrwOz=De4xATm?X?a6 zMfPFPJFkk1!BL>58+BP`dI@q4L!Tq$-nk`6v{g1SRz;odPOTrcSMNdWgsj^xXJ=nA zrO;m=^&aGQCO=p_?P!wP?ecEPh`$Bofn7G}C(RFmqu5UppeoAyK83GM&K+;=^mAL< z-8&!I!f_t+GdzH%70P~|#HqJYa`0sbyY;l?gSpKiND4PAt84ZE_*>r5|BmiJQ|V8s zyo;=b{u)K@0jcHxb=tXVk*NHqgnbPUt3a<5Gag&0WWJF$a$h1Gu#yQA+TGmtUr|;# zg&<8@cA?+FO)djlr2d###}6q_in+csfY{PCW)|P)>JPIaW96C#=cd@wN#Vj_W7HP7 zrsteJzm&MAYekn<#p4pTs*M(t&k;sGq-4Ga!yfZ)6zr|6)hu z(BU1`^s$spnL~HqZIS?s{r5=tusJIA>pKxj1i_x&_3(7L`DtGVxS{YFEjD}K&RwLE;&9=B2@prf-voC=V1%Ua{xsg-#FWv@N&ZI{ciazHVl=J z=#H3-mtxMOlvu%SEkG2MZbhxTH~*=tN`stl6g1pi&L8~T9_HRbqW=ojHWM9#KxR$! zlQk2v(Xo5dJQAXGM`bdsS)U{$LQ^M%EfX$oX6#h7z`x9sC zZl0t+F1YPi-p|(BrdHd`su$^$XAG>RHB)ftzruO=I415s@4dNdD@G>8L?yRm2pf>_ z4~vJ59!~Fzv%NNaQG0{F7ViR3-stQ{*=C^5-nqI&ZbqXh?dq#$MQkf)piMX4I&y($>_IzL;`$YH zhIgUCUZZNBeE*aDCaj@Z(ED0~PQ5CNek~m^vJ<-8+9GJKbi%fBojFdHWM>B zD3YHAbfl^-xN11MT8f#bwceFC! zMbmjy)>U;^B_dmh8bcdJwd(TS0}h(4_;$2+ka zi$PUbA49{gu1>jU9^3sU?DJQVsY|P#<*7g)V1@^`6-TZ5_JKlh>1x`TryK9>G9K)8!Rj>a(}jnueUiVuwE_ZFPP!u$kCRV|>Nl40Z`N?HM&;805gp$M2IC!X82{o7;o_MXi#fNY~Hdgqb|{p{zX(qSIK3q(*?&}kbw zT`eahO{dYxm?LTj2E)&<4M-}*suH`)YnSxvwV#*C66Q`bHTW=u0Er7NwsFE?kfvh` zZIJ=!aEBZ-(UX6Gyo2W#)5%H?as*&HbB&Fm?=sR~irLvd<)mEM?96z>hP4{7isBF^ zuZI!e6N&};f=27!Wj`*~5;Ni~(p1@*F68W`@u2QdL{ELY^DLS7Kw5A)2vbhN!Rs1!)Jw1&V+9>fszY%m5+{{8 z<;fa>n~FpyYu!Gu2)cB#yP0pix^SKSny-@5pptI~P{}_r__UHdfTM;=)W%Q4JAq(O zad;Gn6?!``r(^z_)ZV!nE20mJeuHMU-}NX0Q$e@7N@~DoBu5_q<({n z#j}gM&rFM|?N1E1p$K>`mu?~IvZpKeY{~Ov7{FF@9V{AwCbcwx z3nTp9zH$H{dM0)EOnn0!$)C6v;+*$q=|O@vbL3yL(y1VyS`{-2mKrR1GQ1W;|7-jjzp1Ac7+DH<`? zL2x>Fr>V_-G{Kv-(XI{afCZlihPA<$pReWJSzK`zA}S);@A%+?eJfC>^e*%89^uq* zpm@|+PGd$aUw^yO130&h;Uk<4VEgN1(ts31T~n*$L^6T0gsq{E&~-(c^G~N|M#V0R z=6xaZulpgEWZ~t}uG&$eFXi?gyPKy~d%v|L%=_!a$XS8VJ)nKOHKnG%wAkl{0q&bZ zoQQDWxja-(fweoFJaKjc0vjQHdj7YU%lFNmYX$7`r!qeZa^!&MCiWzRz4W2qTSc7A zLJLL^_H}voqa@|XTZ-0}wvT&{{N8eVLvEi-ZXvf`K?HG4%IGI!eS7KP)i{`CpF7pW za$QF2X|HXmX{@l{Ct@8r2`)l!ZzRS z^f4}sxo^%oBc|g+eTP52d?c0djbiG{*Y65}uv{dt0ao;~fbo@Q+!H~yfbiB)adum@ z?OL-N2D3eWozc6Xsh@u`kioJmXa|6XVBy-BUYxrLMqQE=Vy%xw?;mewxp}qO#`3yb z1Ew~Z&aU0-21jic$~4FUqWlLrk3RPX4&4$kmX4CIXzvAZ@s|U_!rcE(6u^<}TLbQA z)w}cLFYW^PosV#?6hmJMi9l`uTCLLgiik>r_id!5EP4czsg(J$TQ$5{Tibgr$HyeH zwpBY1cXhPwmNl3b7DDZ`;_D#DQhpX-dDiB0-`bqU{{8uSNj-q=(pda`g+pxPtW5%? z$=gv6C)K`DXtwmZErz@MVtiDi0C8Chxd3M@c(9j{6fXS7Hc=_vHnJ zTZJjtU&BZ$g3d$SCGQ^I!DrD63@SY~LYPU9IKk?#0KktK$=*)O394y57@>ZNO83ZP zH~6Lf73^5Y#)w(WD5n^Z@bxJpV*$^T`3f&$OZ&M7xaH_rd~nS+l6Sfj zqckA(^m}vL5Vw*pF_p&NOn1pf32fNa@HC-gdSRFF&FmyNpQBB5#X;_kO7d+P-6F7lC({pa5Bu;o%lirM+@I7HD|2cp}Pm4Rs_Cb0*qh_X0KytbE_C1AGYy@l)nrouYH|kiA09vEpI=` z0$LJ)#H+Oy0)$7L2?GgaVAEfFdX_0>)0x2BmS9RCQSP6SS%KrAQpQf1QHfWpy_x3g z6h=asTTWoGgsNN@hyHbFwGTTSF|7?3OuTIo#uX58Q%qR|WW#HKC3Mk8+jg!}*$4V9KOR(#VBF!Fn!!VVw5YN^%9jl|(O20~Y3T%C2sa?J2(4rMq~KHuHU( zi=%v_92?|AbX69YOWdaNZUeRo)ykz~v^>y~N&91kY-#Hqaz@!u@ z;)hJ(MFI3fKaD-I3PGJ|Z??8NnnX}4Wb?;MqC#g|Y1=S1L}^knx?aQk0#g^lpy4Lq zxJ9DlxgF9#9xaZJP>48}Tv@H|@XyglDva!Y^gA3ZZ%ZtSgt`oBGF+go8Ih;$Qga=C zpE8R*A4{mP=zd!AL^82BxQ~34_mcQ}{Hy^`RIdUOHZpHk*+|hTXQCQ*$AS$5tT{Sf z@}&9yFnQiOo-e1n<5`wbx=wTt!Ai8Jev{2xo&PeQL0$smp-O8X|E+SCQ%?Qj-(;MO zzMuiunfBTxi#z(B(wrAvF>aj6)HFe z^H{LVlh=&!Jk5e=Q<`#_T6rR|`H8G9u-oLF9`c*LXAfMo^2(F+_k zJDU-2os2#-)B~0?Dv?Ud__|w<2>MfCiq|h6AP3w$W&4`)-*L(Gsn)>wlFcp4fA?qK zfSk=%9Pawtd~g`wI^nGvALSls#V1R{0S~CSB|_dyJ4=a5Xyx0DjNEzhdg3Lb{~M=} zpTlYO_BbGS-%N1S4y9zL#CQYZ@{!Xedr93k7mmWQoMfA&#^D>6kv>w`85L97m9ZqH z`o|gN^qnd5p_7*M{1<}I*pu=m@mGO%zX2B*-2zQ42H8zEFgRo)o#wRa_lfRn3ju|w zP(YHnktmwb^7JRH1kf5@_K7(K{G8S77B%GdDtabTTG3E*7V&pbno1W*K!e}gan2t6 z?Qyo0LBIxT?+Vu2eUi-W@b4bzV|h_cN^nV5{v*vFH>W4%=pGuthH%&C$g`5mtwP)A zvN9gnz*w9fY~r)0NCGiBfKlCjMf>~)c_Ll|RIMjQ zxv!vSg;19-%gz|qchoT9mFZfY;N-ngMj}~|&C1W^2pAe(;8^oKRAm&fVoY}q_!*^H zjRcxZV2{VD3~*1lWCMlW{*Iw4j_r?fXZSmWF~iCnNK4Ht#s3(HIR=TiAC|Y>C4Qc= zZiGRdmX~14kvl5n0MEkoLxBe1XHJ0FGa z793yclDPOB-b?0&wz5NRLk))Oxyx6Cs#2k`BaD|YA2CF^{dJ+0yq_l^d$ZCv!W#L!iJH#Vxttd~H{3o> zfSZ-;dsD6lDA5QycXXCG2`ISWAyf?r%l0>J^^SDd-mKKv$%bQfp|c;^8=LbYeKU`$ z(%`ZrLHT#6eCpML&@)eLb~6=a;g_g~2r#D{wYP5HGt=~GL%{OE(DYB6m~(}pc6Q~7 z9zaa;-=gdJu@VK%Jf1(Rzod#>x}T)z9S=u=M@}IcWp?d9U;O&P=-GI<+m)FRMEJFr z1Ej_Ga8RD(>hS&5OPXM6Sd0ipc-*5+O#1(iyO28%^8uA;9%IZRk`YuTa%%+@^RtI; z2v?$O#9PvwA8MWdN7S8TO=$ODQMZ~f7QGwxzUt&x**tti3sJxApFeT?RF|#7bad$$ z5TK%(p`OE2QRf8Y0q9_WfsZK$W4_1qcKB7u1HLoB-PL%2@IDGOoS}#33|3fXBtF!c zPShyYbVeGkTkrI(a8kp`l0Q%;Ipk{f*lcZrA6U#T438uVVXH>FTAFw*N77CJo%2d6FYk@d^lvgJ z2d2fkIx%`f7i47e=ZpTXqpo1OTam{Go;RV|BoHz5^u45$CBCx=l1z7MBj!wT);;HT zhuJGiTyfsp9-BZoMhpaPx=pL-lR61@BiGMAu81xlxDDh%GsB%Mgci93Q zlG4*KPjwi}+Gc52FJBxCjgTIgQ0fr$D2A+eNe`6M)Xu(2b32YjMz7-aW{^j5-e`Dt z4+c^|>|672I=M#q4Stx^k_{{d5R3{5*!W(+bxCjcAAva(xX&4DI+0?s)iXHvJL)M!n zjkgJkUv9POMa$7)i{D;R)7{zs#x8jX?o}$^85!QJZ8&zZSGl+}3#qi&gCBtibSiTm zvePRSrL{;TXHk+sya&oVn;kJXis!(B_BYL^JH@Mw^9La2OdrWy<6hbdqqD=vdWN!> zqT9`}CX+MP4-KOXtPI^Xh?>#O*rkf4s3uJpG0**(y;k1@Lgytue9$1V0{ZothwlYr1wJK;Nyz8x_Qj+i846l*Z?7IEljxi1((1bl z?yEpAmu2^#-`)y@+}lVg=Ab|~U)|d86W;h}cQX_9G86ae7(SrV5ll2ng*IyII_H{q z;t)p@J4~5ekV!Y4ze1iDd4KFT=H!mbS|gvQd1s6+*u+mVbdavUf%|Y~zHagq zf%K#gD8B!pz%-mq@gPX*{rP#okx6L+InA@X2;O086){UE-OJ(|9|qn69Of#uYwYB8 zl~*O~w5D!}B8aR0Bhqm{wQk9sk)7W&abPp`;L`es$I=B`W{Xuhuz#+l)g8)CPj9nl z3LUvG%}rl|eYW1GPxHN7(Pllj*5SeG86FLF&w|s*KNsirG0B!GzhKZ7*n7%(+m<`u zgxV=$kwTjiw~06O5h%B<6mFc1Cx$d68BV`fP3Dya%T6ip{7pv^w987!MPbp4U7NdQ zV#ZpgEn1-ldo`-$-+Q9gCX&mAb|Zj0+UhzAHkngByX#f5eP4gOQyh4qV^qbHDBt3? z>+8Whm2-IYARv&-qzl+XjQ21gvSMHrZ1a?hAu8#K>W&soNxIh(8ICsNH0=xj9uK>= zH$(iW61o>8J!D}7Tvh$wJm`AJf+tk2`tKFrv%cc=i9&@G6RjVdhb0O24*Fzzp>ttz zq29f3Pcr7Y9(M%~r|+kxeLGChSq62H8{C=tBW4^tcVM2EFa@rACJIx0Nf2N|wTfYE zead0pvINgjlzN$tIwNH9KVKBRb89$2@>ET1Ep0j$>Z9(Hz_DTE!gbyFD9FL3X4 z+8-FvJ}QRRDCep*U%Q~7P}vdyYr>dg#z;Yyn2ID zxCe2p$Vs}!G+1-b#E{grr#neAO2|Gby)&x`Q>q*Kq8O)h7@;SUrqzgNa-nq<6XP0s z)MH&Lv;tcZtDR-N{<7>f5NwttI$Eu?+)}^RL(^8H_wF{vKa~hnceRZr)6v5jpW z_j;sr`}NMeFM%dkP(y`V1}PprHLu~pMXyx)69>f`kJEV+Ek;JJ-+0Hx{BYpZD+P`g z>Uxr!e-GL=CCecGKKl}L!>9t15LsQ=Q8tMtHAvLH9G0L5v6tV56LPif^i-_d$tgdJ z?@n+{inwZKpZ||`(81RXfd>GRmC_n>Epxct2+a?FI~*1?BA!saYj}%{*O*SXguJ-ZXv`hg z%sl@9S#++UyJwFo6{F}b zug(u^IBH9tok#0H$b&|CN56eLJY0kmTpd)K&KM_=u0~1R7LK3Y{?aiw`MmInhGbe? z6B)3a_99C3*PSmT_eO*X>g)3C>ji8dqn?jL2^{S1aJCX(zk23zODV8WBk_%WS+(Nq z$+~+f3w-Gp{%J;`Ri}?x!98=3O0Ae=oG?G!fKIK?Bhn6YTd- zwK=>dC=bY2P-Tn4vZYz|jPi>Li+f!hM2k~pcplHbwX`BL^~67pF#3_T${kmAGKE+g z8adJ5##W5iujTe=sHXSaeVcLzfvjabU&my&e|ijpUf&pXHlJYba=j)aOlHRkhV*^K zOg9o%cdaqYK`7s=Tgnk}OwuaU$Lo#g#kuLM7(o))B%xxGXE2a|dWnAMZ5N8Iw?~lM zRrCbW8@#;ipm;tgjSYm~U8v|9XUQnq3O+Wlt+PH~8}dSZeMSNi-dV6m?F(vMf}vf* zU6pKQqAO3wVWbWcOks4^@PD+>%#8hDJqyOM(abUw>HN zP}n)R=#>|COBJd-fN7X#J-HJYFow+k5|BbF(IQsO(L?*Y8W=Z>tyPr0&fFspU!^i) z)g4zH1kE>cc9Ru$Oa7%a z03EVSbFvdnCYSH~pd=-*{qrdPEAWY)zf~-~(2J(nzvQZme$|JQbvjRR}dKdW0ni$_XG0qvh)d<_b8hA7LusU)KAxP0k3K^y`gjtruAw zIg&h=CL)0&RXG!{J&?p#2DEFFE!LusttESxh47t3wiR0tcb0X!!f}!d*4ymCm1Rj* zTN_%pH}YLYLCOvqEs&4t=pKE4@Li8Td+ceN&7NQ?dQdD`ltE1E8KZdNf|zrngroC& zkjpdHtd-B5xJAlU5UCW4vzoIdu^^iT-Mg&_;`fy8hHMp#>}^TaG3|POeD-5?Cs*K_ zUG&PEX6jkI?|G~eETfcp9c^}yr~+l?Q(%!-%tb}T26mZTO;2^GyV{qk3O;8q<2u6l zME5CmZ}%uS>NUJ%NY$uXcUDi#rT|HnU0#8DNM-L7s{`KW+CEWJF4t%k@^)!uR)70s z4fO(gGc^B1OHcQ|H(z3+2^?Xg_LR_Y+nzqdT-*m96HHJnccot2<7P<97c>SM6wiO` z?Kp*c+RAV-YFZ|tkAma!%zO$&ehG5Mm1p06yROa%+;4?`4!)?M#nKc+EMt0<)xxUI zO6F5z83DKAP~J(6wu5rUTDj3HpCOQ8Uja;uZ@8+2Y=`(Q)ukB51XOjnjSHsHij-4=1=V-=T{ zTKg6)QiFOmLO&gLwnj`!|7;GOj$@~4i-OEVHICAzPYBZbI`&k9zhnGdG5u2fWuV_2%5U>*lo-FV9WNflU zoG$EkUNK7bp;1r=&;5MbTk4~k5Q)g1 zd8;UUyb^5bDKcy7o*-(Grzm|E-=Bp_OBn6v{4_M4I6W6(tRTp8|B2ZUK5USB0e_3K zqj9e8WN8aPPa53kt>bl)2tQ<1Moe*2^>GW+DN4dh1M6wJP|4jpe83*6YP_)8^gN7J z*46^uzc{o?9wQFQn)$#@C*Z=jW30f^*>)*x94z#3mp_eB8U>zzxS4;&*Sn=v2;LPi zZss9yc<<9$AD4#DHPj2)uY!-TLr7H1coeS5VA9SfGGcK&e05=^g!7GOCPvpXc40G= zpt`<8zBvOfGBUF!2?By>Q-WR!hMO>bz%%wv3S$#tmGckEt>W*wXkML(@Y+s>v`C2Z zJljQ{{%R4>GAUKxE*?GBk76KqI>?kY?{odG5#h$Y|7}$ohEdfL)$jGAyE)d8J8 zV8u|Y4{a`iQ>bg_Z!L^i%|H0OotPCTC^2KWIy=2rt80j)nG8FyV0rD>gzB;8*5nd- zqDEDdJSAmEy2bX|IPRo?DvS$)uPjbvBBBUO^{%X&t{7j70D?I9DLoP0;jyLm3yvZ6 z8f5V2@;>$IeVu)oO|Ky=XJ>1J_}Si7h2t70Dg%f7q%ovc;lcuZ+dzzSWU}ojISSO^;5L(A8J$z%g z_pS5>(Yp&Cx+Gb0z7cSfsP7b!Z&ksN-cAiAA`Ep-XQ*i33eD_m&BVxXf(B-(4R+RR z7%ezs+xhK8oC3Y)4QeF@tWR@=eol}$eew41Daw-yA~wU$^dyDbafJ5>L9G6{inkmj zJ{1_ z3QMY5Inxz8``X}=8ucV9IV2VN(4y%0-Q2#N+MExio9-QumBBd_#`!-HqYpe!KK@Kk zTicL=)^G^iDxa88I}2^=Wipx(69_UYid;?eQD!<#1r@xnFfnv;-b}XOLFBOx ziGm#gda)KD+S=RW`3qPy@#JpTlD+dWi}YS+pExXfZV*ClX?Me z)1QdAIef0Ep@ck>Sg;{o1MTA(_LUTf13slbV~V#h$(@78kFaN3HsZ9%UQ1h8&4OGR zW9njjR7^;WfBXhSJQT5XBCcZkSOHSCqxbwQNG*ww8`X*#AEKu&6g+$re;-0CxHoz? z6G4&&K1Sj1J|;Jj+^K)YS`RR3*P_wd2t}w<@EN(JDhUZ}%&~D?O)aqK>Ju@%8P*}b z`fF53>#! z=`O&+G%B6rbNUCTr=)W?x;p*b4j2c!I7AkC@-%Dub>0}sn&ZEg$C|iIfjijD?^7z# z_Ktb|+TMUily}WszE-S=T50};#rGn%G~cm#Ho`T|)FLiAd0vbjJmkbq3%OK*Hs>$4 zm6`JDufaur=7n?4OIwXqv`ph417$?(%{`f&x6!oZN74(&a6gzXMRi>A&Qpa2&ooA!61zZ8*hgR{D z9RU{5c<4AxVq9)hU80G7?X+R_M`yX_{cfvQ@omh8iwb=~1))niJIZXrA_u>Z`Dn52 zYZgszM-S3xf*LNNWZ~9RBhv$M+<%$?Id?VYLI@R5@eoa(G5BdhpPp_@l=r zb(oz4u}IbGp&wZ*gAdfwETc)`3#{8uG56Gs9()VZMy3cbY4$IC)@ zf0+6H81rhj{`5%}HG zFEBSF2$UIz||psE%^5veX25=~w=_}uly(6fR)yQwS zGh@+dz%q=z3RTPNTq3CL_`(;_hULXVO-7?RF=lKcZZmI00!yOt?L8yACHI>~-x|B& z?33n}G50`HB8Rc$XOcz`+Z@oc;+I``x5b zs95qA)r}CAQaW6%!#nIw>W^yxI!_>Pq-ffErWXo(bbQrv3Wvk)by|v<&nHhhQ34HC zi;NLQYuOBvI_ik|dG5oV8$Yt`tL`G4xesHqy8p~(gjD)@cSZI(u4OZny|`QCEjiib zk?DH|y-wDXzlG9UFi`edGD8RNYx*FZ|IMV~1$t~(NWxlBy(7)t#`F#gi*fym@=j6Z zl04CkN5&p1%EQkuyl4^`QhvRPhvSjP)}mm~3xzQaFvbtGxLu9#;3Up0!q^&ySsp$_ zKY{mg>qcTC=A1EG*%ur1^0S{*6}kZKlcMhtp1c*R>(M%pdpnU_KH-0*P8kYS25%Us zQ4>RdJ;+zejt8-Rq}Ss2nr9Lq?`W=<{En0Uxbx{SOt#H&;U4)t8Q{7e(%fJ9%1%U+ie<2jVp820C@qo%L#g3O)fNdP%d#ZdF$Z{%vVo9jU z$jGP<{euj0stJ=9ZkEouD3}Bp3L~HlaMY z5J}yU9=5h!J_&)&UZ74Cy2%I$WUF&ygPcJ51ANDB5H-9cu9zoDfwrW~WjULnx-N%Z zznuhPEm_POzykiSARB}m|1VB%cEC8L>+<=alitAMz_n@7#KC1ED5`%e6dKhz*xd$J zC*KEI)&Tm>CKU|a2kA%CvAoC|Kv)@~>GUWGv;}bzqex(7w|w3b>kfuembZs20qlIO zz)}|kFxA&e?TS!n$AOoHXi$&eJ$?_+uSxFO<-b_)h0adS4Cs4wqOg1vw9yoEq5lc! z;)pFy>AW`3N#flaBZ!~ak$ow-K#U)JVUr;Q%x|qA4a~#pm&wDCpkAa)@I?Zshkg?a zM&KZ@0+;pWT3@d9-L<~E)}7(HGhB~d>ri7IYOEu}|7XY$JSFEpJkTPM`le>z-sUY? zJz<@XZ{7aJkpx-Qj#se<0XkPYXL9v0U>|D*JwF0OEm!O$4U%gb|HueflBrGnbYnvj zA25mDiQbX{No@b`sDZ}-cx!AtVOoC~AZ&^bx9Z3iz}a42{C@MKZMM^HaPTigOky)& z3cB9jt`n62c2DS-Bw|4)vMQZl!c_pAcPe7&0$A>N`htDpnslX(i@Xda5;LZAyQ=03 zQYONvDsrZ6-vP0QpqZ#seM{HZXn*+pb$ArSj~8@aNr)P8v_ZDO}uO>h$ zt-U!{)zZw~5_1~lIiKKtYC@|w9er){QQ5$~!f?XaYOy)yy<%kt+`9nx$! z$xvmW=l?}6mjHjA4f`VZH=#cilXMVRi`+@mWZnO0=|4BO(*ehW*-`(Z!GC|Z>lx7G z?Q?t2|FzeD$*p(xccrYW@y{LjhyAP@$hv_**FDJJ-Rim{UPBgthOl)5`HL>r9O$}% e{O=mbigA$MStm@SOVbAMckGDMVdBBlzx@-MdI