Skip to content

Commit

Permalink
Merge pull request #750 from AIStream-Peelout/fix_circleci
Browse files Browse the repository at this point in the history
Fix circleci
  • Loading branch information
isaacmg committed May 15, 2024
2 parents 47d2b43 + 4829510 commit 75bc3c8
Show file tree
Hide file tree
Showing 17 changed files with 105 additions and 48 deletions.
86 changes: 71 additions & 15 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ orbs:
executors:
python-executor:
docker:
- image: cimg/python:3.10.3
- image: cimg/python:3.11
working_directory: ~/repo


Expand Down Expand Up @@ -254,11 +254,13 @@ jobs:
command: |
echo -e 'GRU Vanilla test'
coverage run flood_forecast/trainer.py -p tests/gru_vanilla.json
echo -e 'Running classification test'
coverage run flood_forecast/trainer.py -p tests/classification_test.json
echo -e 'Running informer single test'
coverage run flood_forecast/trainer.py -p tests/test_inf_single.json
echo -e 'test informer single target'
echo -e 'test multi informer'
echo -e 'Running transformer_gaussian test'
coverage run flood_forecast/trainer.py -p tests/transformer_gaussian.json
echo -e 'Running multi_decoder_test.json'
coverage run flood_forecast/trainer.py -p tests/multi_decoder_test.json
echo -e 'training multi-task-decoder'
coverage run flood_forecast/trainer.py -p tests/multitask_decoder.json
Expand All @@ -274,14 +276,7 @@ jobs:
coverage run flood_forecast/trainer.py -p tests/probabilistic_linear_regression_test.json
echo -e 'running meta-data fusion'
coverage run flood_forecast/trainer.py -p tests/meta_data_test.json
echo -e 'running da_rnn_test \n'
coverage run flood_forecast/trainer.py -p tests/da_rnn.json
echo -e 'running auto test \n'
coverage run flood_forecast/meta_train.py -p tests/auto_encoder.json
echo -e 'running trainer_lstm_test \n '
coverage run flood_forecast/trainer.py -p tests/lstm_test.json
echo -e 'running trainer_multi_test \n'
coverage run flood_forecast/trainer.py -p tests/multi_test.json
- store_test_results:
path: test-results

Expand Down Expand Up @@ -319,23 +314,32 @@ jobs:
name: Trainer1 tests
when: always
command: |
echo -e 'running test_informer \n'
coverage run flood_forecast/trainer.py -p tests/test_informer.json
echo -e 'running test_iTransformer \n'
coverage run flood_forecast/trainer.py -p tests/test_iTransformer.json
echo -e 'running tsmixer_test \n'
coverage run flood_forecast/trainer.py -p tests/tsmixer_test.json
echo -e 'running transformer_b_series.json \n'
coverage run flood_forecast/trainer.py -p tests/transformer_b_series.json
echo -e 'running crossfroer.json \n'
coverage run flood_forecast/trainer.py -p tests/cross_former.json
echo -e 'running nlinear.json \n'
coverage run flood_forecast/trainer.py -p tests/nlinear.json
echo -e 'running dsanet_3\n'
coverage run flood_forecast/trainer.py -p tests/dsanet_3.json
echo -e 'running variable_autoencoderl.json\n'
coverage run flood_forecast/trainer.py -p tests/variable_autoencoderl.json
echo -e 'running dlinear\n'
coverage run flood_forecast/trainer.py -p tests/dlinear.json
echo -e 'running custom_encode.json\n'
coverage run flood_forecast/trainer.py -p tests/custom_encode.json
echo -e 'running multi_decoder_test \n'
coverage run flood_forecast/trainer.py -p tests/multi_decoder_test.json
echo -e 'test_dual \n'
coverage run flood_forecast/trainer.py -p tests/test_dual.json
echo -e 'running dsanet \n'
coverage run flood_forecast/trainer.py -p tests/dsanet.json
echo -e 'running trainer_decoder_test \n'
coverage run flood_forecast/trainer.py -p tests/decoder_test.json
echo -e 'running trainer_full_transformer_test'
coverage run flood_forecast/trainer.py -p tests/full_transformer.json
- store_test_results:
path: test-results
Expand All @@ -349,6 +353,54 @@ jobs:
when: always
command: bash <(curl -s https://codecov.io/bash) -cF python

trainer_test2:
<<: *defaults
steps:
- attach_workspace:
at: ~/repo
- restore_cache: # ensure this step occurs *before* installing dependencies
key: requirements-v2-{{ .Branch }}-{{ checksum "requirements.txt" }}

- run:
name: install dependencies
command: |
pip install pipenv
pip install --force-reinstall --upgrade --quiet -r requirements.txt
pip install coverage
python setup.py develop --no-deps
- save_cache:
key: requirements-v2-{{ .Branch }}-{{ checksum "requirements.txt" }}
paths:
- ~/.local

- run:
name: Trainer tests 2
when: always
command: |
echo -e 'running trainer_lstm_test \n '
coverage run flood_forecast/trainer.py -p tests/lstm_test.json
echo -e 'running trainer_multi_test \n'
coverage run flood_forecast/trainer.py -p tests/multi_test.json
echo -e 'running trainer_full_transformer_test \n'
coverage run flood_forecast/trainer.py -p tests/full_transformer.json
echo -e 'running da_rnn_test \n'
coverage run flood_forecast/trainer.py -p tests/da_rnn.json
echo -e 'running auto_encoder \n'
coverage run flood_forecast/meta_train.py -p tests/auto_encoder.json
echo -e 'running trainer_decoder_test \n'
coverage run flood_forecast/trainer.py -p tests/decoder_test.json
- store_test_results:
path: test-results

- store_artifacts:
path: test-results
destination: test-results-trainer

- run:
name: upload results
when: always
command: bash <(curl -s https://codecov.io/bash) -cF python

plot_test:
<<: *defaults
steps:
Expand Down Expand Up @@ -420,3 +472,7 @@ workflows:
- trainer_test1:
requires:
- setup_and_install
- trainer_test2:
requires:
- setup_and_install

2 changes: 1 addition & 1 deletion docs/source/basic_ae.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ Simple AE
.. automodule:: flood_forecast.meta_models.basic_ae
:members:

A simple auto-encoder
A simple auto-encoder model.
4 changes: 3 additions & 1 deletion docs/source/basic_utils.rst
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
Basic GCP Utils
Basic Google Cloud Platform Utilities
================

Flow Forecast natively integrates with Google Cloud Platform.

.. automodule:: flood_forecast.gcp_integration.basic_utils
:members:
4 changes: 4 additions & 0 deletions docs/source/crossformer.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Crossformer
=========================
.. automodule:: flood_forecast.transformer_xl.crossformer
:members:
3 changes: 1 addition & 2 deletions docs/source/custom_opt.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
Custom Optimizations
Custom Optimizers and more
====================

.. automodule:: flood_forecast.custom.custom_opt
:members:
sss
5 changes: 5 additions & 0 deletions docs/source/d_linear.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
D and N Linear
==================

.. automodule:: flood_forecast.basic.d_n_linear
:members:
1 change: 1 addition & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ of datasets. This documentation describes the internal Python code that makes up
:caption: TransformerXL:

dummy_torch
itransformer
lower_upper_config
multi_head_base
transformer_basic
Expand Down
5 changes: 5 additions & 0 deletions docs/source/itransformer.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
I-Transformer Model.
==================

.. automodule:: flood_forecast.transformer_xl.itransformer
:members:
3 changes: 0 additions & 3 deletions flood_forecast/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,6 @@ def generate_predictions(
history_dim = history
else:
history_dim = history.unsqueeze(0).to(model.device)
print("Add debugging crap below")
if decoder_params is None:
end_tensor = generate_predictions_non_decoded(
model, df, test_data, history_dim, forecast_length, hours_to_forecast,
Expand Down Expand Up @@ -615,8 +614,6 @@ def generate_predictions_non_decoded(
end_tensor = torch.cat(all_tensor, axis=0).to("cpu").detach()[:-remainder]
else:
end_tensor = torch.cat(all_tensor, axis=0).to("cpu").detach()

print(end_tensor.shape) # Dimension now is (n_time_steps_to_forecast_steps)!! i.e [16]
return end_tensor


Expand Down
2 changes: 1 addition & 1 deletion flood_forecast/model_dict_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@


"""
Utility dictionaries to map a string to a c class
Utility dictionaries to map a string to a class
"""
pytorch_model_dict = {
"MultiAttnHeadSimple": MultiAttnHeadSimple,
Expand Down
16 changes: 5 additions & 11 deletions flood_forecast/preprocessing/pytorch_loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,8 @@ def __init__(self, series_id_col: str, main_params: dict, return_method: str, re
print(new_df.columns)
self.listed_vals = df_list
self.__make_unique_dict__()
self.__validate_data__in_df()
if return_all:
self.__validate_data__in_df()
print(self.unique_dict)
print("unique dict")

Expand Down Expand Up @@ -644,7 +645,7 @@ def get_item_auto_encoder(self, idx):
return the_seq.float(), the_seq.float()

def pad_input_data(self, sequence: int):
"""Pads a sequence to a specified length
"""Pads a sequence to a specified length.
"""
if self.pad_length > sequence.shape[0]:
pad_dim = self.pad_length - sequence.shape[0]
Expand All @@ -657,13 +658,6 @@ def __getitem__(self, idx: int):
return tasks[self.task](idx)


class CSVResultsHolder(object):
def __init__(self, historical_rows, all_rows_orig, targ_idx) -> None:
self.historical_rows = historical_rows
self.all_rows_orig = all_rows_orig
self.targ_idx = targ_idx


class SeriesIDTestLoader(CSVSeriesIDLoader):
def __init__(self, series_id_col: str, main_params: dict, return_method: str, forecast_total=336, return_all=True):
"""_summary_
Expand All @@ -672,11 +666,11 @@ def __init__(self, series_id_col: str, main_params: dict, return_method: str, fo
:type series_id_col: str
:param main_params: The core params used to instantiate the CSVSeriesIDLoader
:type main_params: dict
:param return_method: _description_D
:param return_method: The method of return
:type return_method: str
:param return_all: _description_, defaults to True
:type return_all: bool, optional
:param forecast_total: _description_, defaults to 336
:param forecast_total: The total length to forecast, defaults to 336
:type forecast_total: int, optional
"""
super().__init__(series_id_col, main_params, return_method, return_all)
Expand Down
2 changes: 1 addition & 1 deletion flood_forecast/preprocessing/temporal_feats.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def create_feature(key: str, value: str, df: pd.DataFrame, dt_column: str):
:type df: pd.DataFrame
:param dt_column: The name of the datetime column
:type dt_column: str
:return: The dataframe with the newly added column.
:return: The dataframe with the newly added columns.
:rtype: pd.DataFrame
"""
if key == "day_of_week":
Expand Down
1 change: 1 addition & 0 deletions flood_forecast/transformer_xl/basis_former.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# TO-DO implement basis former
10 changes: 5 additions & 5 deletions flood_forecast/transformer_xl/cross_former.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,21 +33,21 @@ def __init__(
:type forecast_length: int
:param seg_len: Parameter specific to Crossformer, forecast_history must be divisible by seg_len
:type seg_len: int
:param win_size: _description_, defaults to 4
:param win_size: The window size for the segment merge mechanism, defaults to 4 (original paper used 2)
:type win_size: int, optional
:param factor: _description_, defaults to 10
:type factor: int, optional
:param d_model: _description_, defaults to 512
:type d_model: int, optional
:param d_ff: _description_, defaults to 1024
:type d_ff: int, optional
:param n_heads: _description_, defaults to 8
:param n_heads: The number of heads in the multi-head attention mechanism, defaults to 8
:type n_heads: int, optional
:param e_layers: _description_, defaults to 3
:param e_layers: The number of encoder layers, defaults to 3
:type e_layers: int, optional
:param dropout: _description_, defaults to 0.0
:param dropout: The amount of dropout to use when training the model, defaults to 0.0
:type dropout: float, optional
:param baseline: _description_, defaults to False
:param baseline: A boolean of whether to use mean of the past time series , defaults to False
:type baseline: bool, optional
:param device: _description_, defaults to torch.device("cuda:0")
:type device: str, optional
Expand Down
3 changes: 0 additions & 3 deletions flood_forecast/transformer_xl/informer.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,7 @@ def forward(self, x_enc: torch.Tensor, x_mark_enc, x_dec, x_mark_dec,
"""
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, _ = self.encoder(enc_out, attn_mask=enc_self_mask)
print(type(enc_out))
print("thing above")
dec_out = self.dec_embedding(x_dec, x_mark_dec)
print(type(dec_out))
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
dec_out = self.projection(dec_out)
# dec_out = self.end_conv1(dec_out)
Expand Down
4 changes: 0 additions & 4 deletions flood_forecast/transformer_xl/transformer_bottleneck.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,10 +268,6 @@ def forward(self, series_id: int, x: torch.Tensor):
embedding_sum = torch.zeros(batch_size, length)
embedding_sum = embedding_sum.fill_(series_id).type(torch.LongTensor).to(self.device)
embedding_sum = self.id_embed(embedding_sum)
print("shape below")
print(embedding_sum.shape)
print(x.shape)
print(series_id)
position = torch.tensor(torch.arange(length), dtype=torch.long).to(self.device)
po_embedding = self.po_embed(position)
embedding_sum[:] = po_embedding
Expand Down
2 changes: 1 addition & 1 deletion tests/custom_encode.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

},
"lr": 0.3,
"epochs": 30,
"epochs": 2,
"batch_size":4

},
Expand Down

0 comments on commit 75bc3c8

Please sign in to comment.