Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

KeyError in Stacker #19

Closed
chrissny88 opened this issue Oct 4, 2022 · 29 comments
Closed

KeyError in Stacker #19

chrissny88 opened this issue Oct 4, 2022 · 29 comments

Comments

@chrissny88
Copy link

This is the code:

from verstack import Stacker

stacker = Stacker(objective = 'regression', auto = True)
X_train = stacker.fit_transform(X_train, y_train)
X_val = stacker.transform(X_val)
df1 = stacker.transform(df1)

get lists of features created in each layer

layer_1_feats = stacker.stacked_features['layer_1']
layer_2_feats = stacker.stacked_features['layer_2']

model = LGBMRegressor(random_state=1)

use only the second layer outputs as inputs in to the final meta_model

model.fit(X_train[layer_2_feats], y_train)
pred = model.predict(df1[layer_2_feats])

And below is the error I am getting:

  • Initiating Stacker.fit_transform

    • Training/predicting with layer_1 models
      . Optimising model hyperparameters

KeyError Traceback (most recent call last)
File :13, in

File ~\anaconda3\lib\site-packages\verstack\tools.py:19, in timer..wrapped(*args, **kwargs)
16 @wraps(func)
17 def wrapped(*args, **kwargs):
18 start = time.time()
---> 19 result = func(*args, **kwargs)
20 end = time.time()
21 elapsed = round(end-start,5)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:615, in Stacker.fit_transform(self, X, y)
613 validate_fit_transform_args(X, y)
614 X_with_stacked_feats = X.reset_index(drop=True).copy()
--> 615 X_with_stacked_feats = self._apply_all_or_extra_layers_to_train(X_with_stacked_feats, y)
616 return X_with_stacked_feats

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:574, in Stacker._apply_all_or_extra_layers_to_train(self, X, y)
572 if layers_added_after_fit_transform:
573 for layer in layers_added_after_fit_transform:
--> 574 X = self._apply_single_layer(layer, X, y)
575 else:
576 # if no extra layers apply all layers on train set
577 X = self._apply_all_layers(X, y)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:510, in Stacker._apply_single_layer(self, layer, X, y)
506 new_feats = self._create_new_feats_in_test(X, y, layer, applicable_feats)
507 # ---------------------------------------------------------------------
508 # create stacked feats in train set
509 else:
--> 510 new_feats = self._create_new_feats_in_train(X, y, layer, applicable_feats)
511 for feat in new_feats:
512 X = pd.concat([X, feat], axis = 1)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:478, in Stacker._create_new_feats_in_train(self, X, y, layer, applicable_feats)
476 for model in self.layers[layer]:
477 feat_name = self._create_feat_name(layer)
--> 478 new_feat = self._get_stack_feat(model, X[applicable_feats], y)
479 # append trained models from buffer to self.trained_models_list for layer/feature
480 self.trained_models[layer][feat_name] = self._trained_models_list_buffer

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:316, in Stacker._get_stack_feat(self, model, X, y)
314 '''Apply stacking features creatin to either train or test set'''
315 if isinstance(y, pd.Series):
--> 316 new_feat = self._train_predict_by_model(model, X, y)
317 else:
318 new_feat = self._predict_by_model(model, X)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:300, in Stacker._train_predict_by_model(self, model, X, y)
298 for train_ix, test_ix in kfold.split(X,y):
299 X_train = X.loc[train_ix, :]
--> 300 y_train = y.loc[train_ix]
301 X_test = X.loc[test_ix, :]
302 # create independent model instance for each fold

File ~\anaconda3\lib\site-packages\pandas\core\indexing.py:967, in _LocationIndexer.getitem(self, key)
964 axis = self.axis or 0
966 maybe_callable = com.apply_if_callable(key, self.obj)
--> 967 return self._getitem_axis(maybe_callable, axis=axis)

File ~\anaconda3\lib\site-packages\pandas\core\indexing.py:1191, in _LocIndexer._getitem_axis(self, key, axis)
1188 if hasattr(key, "ndim") and key.ndim > 1:
1189 raise ValueError("Cannot index with multidimensional key")
-> 1191 return self._getitem_iterable(key, axis=axis)
1193 # nested tuple slicing
1194 if is_nested_tuple(key, labels):

File ~\anaconda3\lib\site-packages\pandas\core\indexing.py:1132, in _LocIndexer._getitem_iterable(self, key, axis)
1129 self._validate_key(key, axis)
1131 # A collection of keys
-> 1132 keyarr, indexer = self._get_listlike_indexer(key, axis)
1133 return self.obj._reindex_with_indexers(
1134 {axis: [keyarr, indexer]}, copy=True, allow_dups=True
1135 )

File ~\anaconda3\lib\site-packages\pandas\core\indexing.py:1327, in _LocIndexer._get_listlike_indexer(self, key, axis)
1324 ax = self.obj._get_axis(axis)
1325 axis_name = self.obj._get_axis_name(axis)
-> 1327 keyarr, indexer = ax._get_indexer_strict(key, axis_name)
1329 return keyarr, indexer

File ~\anaconda3\lib\site-packages\pandas\core\indexes\base.py:5782, in Index._get_indexer_strict(self, key, axis_name)
5779 else:
5780 keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)
-> 5782 self._raise_if_missing(keyarr, indexer, axis_name)
5784 keyarr = self.take(indexer)
5785 if isinstance(key, Index):
5786 # GH 42790 - Preserve name from an Index

File ~\anaconda3\lib\site-packages\pandas\core\indexes\base.py:5845, in Index._raise_if_missing(self, key, indexer, axis_name)
5842 raise KeyError(f"None of [{key}] are in the [{axis_name}]")
5844 not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())
-> 5845 raise KeyError(f"{not_found} not in index")

KeyError: '[3, 10, 16, 20, 21, 23, 25, 35, 38, 40, 41, 43, 47, 48, 60, 61, 65, 74, 77, 85, 86, 93, 98, 100, 110, 113, 121, 126, 128, 129, 133, 134, 135, 136, 142, 143, 149, 150, 151, 158, 159, 162, 165, 169, 172, 175, 177, 183, 188, 195, 203, 204, 219, 221, 223, 228, 229, 231, 236, 249, 264, 271, 274, 275, 283, 290, 296, 303, 304, 308, 311, 319, 323, 330, 331, 332, 338, 343, 354, 358, 359, 363, 366, 372, 379, 383, 385, 386, 388, 394, 399, 416, 419, 421, 424, 426, 436, 441, 442, 445, 449, 462, 468, 471, 474, 480, 489, 501, 507, 510, 530, 542, 547, 551, 555, 557, 559, 567, 572, 576, 582, 584, 585, 588, 609, 610, 611, 613, 615, 618, 619, 620, 622, 625, 627, 633, 641, 642, 660, 663, 668, 669, 670, 671, 674, 675, 690, 692, 693, 695, 703, 706, 710, 714, 716, 726, 730, 733, 739, 742, 749, 755, 756, 757, 761, 762, 763, 765, 768, 771, 772, 784, 793, 804, 806, 815, 821, 824, 831, 834, 835, 836, 848, 858, 861, 871, 874, 875, 878, 880, 884, 885, 892, 895, 896, 897, 904, 909, 912, 917, 921, 923, 925, 926, 928, 931, 932, 937, 938, 948, 950, 952, 962, 964, 974, 984, 987, 991, 996, 998, 1003, 1007, 1011, 1012, 1015, 1026, 1031, 1032, 1033, 1035, 1038, 1040, 1056, 1068, 1070, 1077, 1078, 1082, 1083, 1084, 1085, 1105, 1110, 1112, 1115, 1121, 1126, 1130, 1136, 1137, 1141, 1151, 1154, 1156, 1172, 1175, 1179, 1184, 1185, 1187, 1189, 1192, 1211, 1217, 1218, 1225, 1229, 1231, 1232, 1249, 1256, 1268, 1278, 1280, 1281, 1286, 1287, 1293, 1295, 1298, 1309, 1310, 1311, 1313, 1318, 1323, 1326, 1331, 1332, 1336, 1339, 1344, 1345, 1347, 1348, 1357, 1364, 1365, 1366, 1369, 1372, 1379, 1380, 1383, 1385, 1388, 1392, 1396, 1402, 1404, 1415, 1427, 1429, 1437, 1440, 1441, 1452, 1453, 1457, 1458, 1459, 1463, 1464, 1472, 1473, 1474, 1479, 1482, 1486, 1499, 1507, 1517, 1518, 1521, 1535, 1537, 1539, 1541, 1542, 1544, 1545, 1548, 1551, 1554, 1555, 1556, 1557, 1570, 1577, 1578, 1579, 1587, 1590, 1610, 1613, 1617, 1618, 1624, 1633, 1634, 1635, 1636, 1639, 1643, 1651, 1657, 1673, 1680, 1687, 1692, 1697, 1723, 1732, 1735, 1736, 1737, 1747, 1750, 1753, 1756, 1757, 1762, 1765, 1772, 1774, 1783, 1788, 1794, 1800, 1815, 1824, 1836, 1843, 1844, 1847, 1849, 1850, 1863, 1870, 1877, 1886, 1888, 1891, 1894, 1897, 1899, 1904, 1910, 1914, 1918, 1923, 1925, 1926, 1935, 1938, 1940, 1949, 1951, 1956, 1959, 1961, 1966, 1972, 1973, 1979, 1987, 1994, 1997, 2002, 2014, 2017, 2018, 2023, 2028, 2032, 2039, 2044, 2045, 2048, 2050, 2072, 2077, 2079, 2081, 2089, 2092, 2101, 2105, 2107, 2108, 2113, 2115, 2116, 2118, 2119, 2125, 2135, 2141, 2142, 2143, 2144, 2147, 2149, 2151, 2160, 2163, 2166, 2167, 2174, 2175, 2180, 2181, 2184, 2195, 2198, 2200, 2207, 2215, 2218, 2221, 2222, 2224, 2236, 2239, 2243, 2245, 2248, 2254, 2256, 2260, 2264, 2268, 2270, 2274, 2275, 2283, 2288, 2289, 2291, 2292, 2297, 2305, 2309, 2317, 2323, 2324, 2327, 2329, 2330, 2333, 2335, 2336, 2338, 2349, 2360, 2361, 2363, 2370, 2373, 2379, 2384, 2388, 2393, 2399, 2403, 2405, 2408, 2411, 2413, 2416, 2419, 2421, 2429, 2435, 2437, 2439, 2443, 2444, 2446, 2449, 2461, 2470, 2473, 2478, 2484, 2485, 2492, 2503, 2504, 2505, 2506, 2507, 2515, 2518, 2531, 2538, 2544, 2545, 2548, 2557, 2560, 2564, 2568, 2569, 2573, 2578, 2583, 2587, 2596, 2623, 2626, 2630, 2633, 2647, 2652, 2659, 2662, 2664, 2670, 2678, 2679, 2682, 2683, 2686, 2689, 2690, 2701, 2702, 2704, 2705, 2708, 2723, 2726, 2732, 2734, 2746, 2750, 2752, 2758, 2761, 2767, 2769, 2770, 2777, 2779, 2781, 2785, 2788, 2790, 2793, 2794, 2800, 2803, 2806, 2807, 2810, 2814, 2830, 2832, 2840, 2850, 2859, 2861, 2862, 2867, 2879, 2882, 2887, 2899, 2901, 2904, 2906, 2911, 2920, 2922, 2924, 2927, 2928, 2929, 2934, 2936, 2941, 2944, 2972, 2977, 2979, 2984, 2985, 2986, 2990, 2991, 2995, 3005, 3008, 3013, 3022, 3028, 3031, 3037, 3038, 3039, 3044, 3046, 3048, 3051, 3056, 3059, 3062, 3063, 3064, 3070, 3074, 3076, 3078, 3079, 3083, 3091, 3094, 3096, 3098, 3112, 3120, 3124, 3126, 3133, 3134, 3135, 3145, 3146, 3159, 3160, 3165, 3172, 3183, 3184, 3186, 3188, 3189, 3190, 3203, 3205, 3211, 3214, 3229, 3236, 3241, 3253, 3255, 3260, 3273, 3276, 3280, 3283, 3284, 3289, 3292, 3293, 3294, 3301, 3302, 3306, 3312, 3313, 3317, 3326, 3327, 3328, 3339, 3340, 3346, 3350, 3353, 3355, 3356, 3358, 3372, 3373, 3374, 3375, 3380, 3381, 3382, 3384, 3395, 3397, 3405, 3406, 3407, 3409, 3416, 3418, 3420, 3422, 3423, 3424, 3425, 3427, 3432, 3433, 3438, 3443, 3453, 3455, 3456, 3467, 3469, 3470, 3471, 3472, 3474, 3475, 3476, 3494, 3508, 3516, 3517, 3518, 3520, 3522, 3527, 3532, 3534, 3540, 3544, 3546, 3549, 3550, 3551, 3553, 3565, 3570, 3571, 3577, 3579, 3580, 3583, 3584, 3599, 3602, 3611, 3614, 3615, 3625, 3628, 3637, 3642, 3643, 3646, 3650, 3651, 3664, 3677, 3683, 3698, 3705, 3710, 3711, 3714, 3722, 3728, 3729, 3734 ] not in index'

@DanilZherebtsov
Copy link
Owner

KeyError Traceback (most recent call last)

The error reads that the data has different dimensions.

Can you please share the train/valid/test data snippets?

@chrissny88
Copy link
Author

X_train.shape,X_test.shape,df1.shape
((18199, 117), (1065, 117), (7517, 117))

and the same data was use on LGBMTuner and it worked well

@DanilZherebtsov
Copy link
Owner

what about
X_train.columns == X_val.columns
X_train.columns == df1.columns

all True?

@chrissny88
Copy link
Author

@chrissny88
Copy link
Author

array([ True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True])
all True

@DanilZherebtsov
Copy link
Owner

Thanks for the data. You didn't mention which is the target column in your data, so I assumed it was Unnamed: 0

I have used your data to replicate the problem you are referring to and didn't catch any errors. Try this code just to make sure that we are doing the same thing.

It starts with printing all the dependent libraries versions, so please check if yours are in line with the requirements

# First check if all the libs versions are in line
def print_verstack_dependencies_versions():
    print('\nVERSTACK DEPENDENCIES\n')
    import numpy; print(f'numpy version: {numpy.__version__}')
    import pandas; print(f'pandas version: {pandas.__version__}')
    import xgboost; print(f'xgboost version: {xgboost.__version__}')
    import sklearn; print (f'scikit-learn version: {sklearn.__version__}')
    import lightgbm; print(f'lightgbm version: {lightgbm.__version__}')
    import optuna; print(f'optuna version: {optuna.__version__}')
    import plotly; print(f'plotly versoin: {plotly.__version__}')
    import matplotlib; print(f'matplotlib version: {matplotlib.__version__}')
    import dateutil; print(f'python-dateutil version: {dateutil.__version__}')
    import holidays; print(f'holidays version: {holidays.__version__}')
    import mlxtend; print(f'mlxtend version: {mlxtend.__version__}')
    import tensorflow; print(f'tensorflow version: {tensorflow.__version__}')
    import keras; print(f'keras version: {keras.__version__}')
    import category_encoders; print(f'category_encoders version: {category_encoders.__version__}')
    print('-'*50)
    
print_verstack_dependencies_versions()
# ------------------------------------------------------------
import pandas as pd
from verstack import Stacker  
from sklearn.tree import DecisionTreeRegressor

# import train/test data
train = pd.read_csv('train.1.csv')
test = pd.read_csv('X_test.csv')

# point to the target variable
target = 'Unnamed: 0'

# create train/test objects for training/prediction
X_train = train.drop(target, axis = 1)
y_train = train[target]
X_test = test.drop(target, axis = 1)
y_test = test[target]

# init stacker
stacker = Stacker(objective = 'regression', auto = True) 

# fit transform train data
X_train = stacker.fit_transform(X_train, y_train) 

# transform test data
X_test = stacker.transform(X_test)

# get lists of features created in each layer 
layer_1_feats = stacker.stacked_features['layer_1'] 
layer_2_feats = stacker.stacked_features['layer_2']  

# use only the second layer outputs as inputs in to the final meta_model 
model = DecisionTreeRegressor()  
model.fit(X_train[layer_2_feats], y_train)
pred = model.predict(X_test[layer_2_feats])  

These are the libs versions that are required:

VERSTACK DEPENDENCIES

numpy version: 1.19.5
pandas version: 1.3.0
xgboost version: 1.1.0
scikit-learn version: 1.0.1
lightgbm version: 3.3.0
optuna version: 2.10.0
plotly versoin: 5.3.1
matplotlib version: 3.3.4
python-dateutil version: 2.8.1
holidays version: 0.11.3.1
mlxtend version: 0.18.0
tensorflow version: 2.7.0
keras version: 2.7.0
category_encoders version: 2.4.0
--------------------------------------------------

@chrissny88
Copy link
Author

Thanks @DanilZherebtsov , let me try it and see what it gives.
Can I use any algorithm I want for the final model?

@chrissny88
Copy link
Author

I Think all librariesrequiremets are met
numpy version: 1.21.5
pandas version: 1.4.2
xgboost version: 1.6.1
scikit-learn version: 1.0.1
lightgbm version: 3.3.0
optuna version: 2.10.0
plotly versoin: 5.3.1
matplotlib version: 3.5.1
python-dateutil version: 2.8.1
holidays version: 0.11.3.1
mlxtend version: 0.19.0
tensorflow version: 2.7.0
keras version: 2.7.0
category_encoders version: 2.4.0

@DanilZherebtsov
Copy link
Owner

Can I use any algorithm I want for the final model?

Yes, any model will work just fine

@chrissny88
Copy link
Author

The key error issue is resolved , now getting the following error:

  • Initiating Stacker.fit_transform

    • Training/predicting with layer_1 models
      . Optimising model hyperparameters
      .. fold 2 trained/predicted
      .. fold 4 trained/predicted
      . Optimising model hyperparameters
      .. fold 2 trained/predicted
      .. fold 4 trained/predicted
      . Optimising model hyperparameters
      .. fold 2 trained/predicted
      .. fold 4 trained/predicted
      . Optimising model hyperparameters
      .. Model not in optimisation list <verstack.stacking.kerasModel.kerasModel object at 0x00000179C25884F0>
      Epoch 1/200
      400/400 [==============================] - 6s 12ms/step - loss: 61.1809 - val_loss: 45.6878
      Epoch 2/200
      400/400 [==============================] - 1s 3ms/step - loss: 44.7508 - val_loss: 44.6705
      Epoch 3/200
      400/400 [==============================] - 1s 3ms/step - loss: 43.4057 - val_loss: 44.3273
      Epoch 4/200
      400/400 [==============================] - 1s 3ms/step - loss: 42.6066 - val_loss: 43.7882
      Epoch 5/200
      400/400 [==============================] - 1s 3ms/step - loss: 42.1308 - val_loss: 43.4833
      Epoch 6/200
      400/400 [==============================] - 1s 3ms/step - loss: 41.5509 - val_loss: 43.4861
      Epoch 7/200
      400/400 [==============================] - 1s 3ms/step - loss: 41.1257 - val_loss: 43.1080
      Epoch 8/200
      400/400 [==============================] - 1s 3ms/step - loss: 40.5419 - val_loss: 42.8096
      Epoch 9/200
      400/400 [==============================] - 1s 3ms/step - loss: 40.7069 - val_loss: 43.0724
      Epoch 10/200
      400/400 [==============================] - 1s 3ms/step - loss: 39.9577 - val_loss: 42.1842
      Epoch 11/200
      400/400 [==============================] - 1s 3ms/step - loss: 39.7359 - val_loss: 42.5659
      Epoch 12/200
      400/400 [==============================] - 1s 3ms/step - loss: 39.5250 - val_loss: 42.7879
      Epoch 13/200
      400/400 [==============================] - 1s 3ms/step - loss: 39.4272 - val_loss: 42.5011
      Epoch 14/200
      400/400 [==============================] - 1s 3ms/step - loss: 38.9736 - val_loss: 42.9530
      Epoch 15/200
      400/400 [==============================] - 1s 3ms/step - loss: 38.8194 - val_loss: 42.4270
      Epoch 16/200
      400/400 [==============================] - 1s 3ms/step - loss: 38.5331 - val_loss: 43.0290
      Epoch 17/200
      400/400 [==============================] - 1s 3ms/step - loss: 38.3927 - val_loss: 42.6174
      Epoch 18/200
      400/400 [==============================] - 1s 3ms/step - loss: 38.1196 - val_loss: 42.8906
      Epoch 19/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.9949 - val_loss: 42.2633
      Epoch 20/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.8260 - val_loss: 42.6951
      Epoch 21/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.5979 - val_loss: 42.1408
      Epoch 22/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.5510 - val_loss: 43.0909
      Epoch 23/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.1438 - val_loss: 42.8110
      Epoch 24/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.1210 - val_loss: 42.0858
      Epoch 25/200
      400/400 [==============================] - 1s 3ms/step - loss: 37.1234 - val_loss: 45.0286
      Epoch 26/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.9060 - val_loss: 43.8422
      Epoch 27/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.7060 - val_loss: 42.7008
      Epoch 28/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.4021 - val_loss: 43.3145
      Epoch 29/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.3461 - val_loss: 42.8035
      Epoch 30/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.3608 - val_loss: 42.8933
      Epoch 31/200
      400/400 [==============================] - 1s 3ms/step - loss: 36.0465 - val_loss: 43.4430
      Epoch 32/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.9692 - val_loss: 42.5859
      Epoch 33/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.7871 - val_loss: 43.5365
      Epoch 34/200
      400/400 [==============================] - 2s 4ms/step - loss: 35.5787 - val_loss: 43.0042
      Epoch 35/200
      400/400 [==============================] - 2s 5ms/step - loss: 35.5441 - val_loss: 44.0689
      Epoch 36/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.6130 - val_loss: 43.6545
      Epoch 37/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.3230 - val_loss: 43.7473
      Epoch 38/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.1811 - val_loss: 43.3125
      Epoch 39/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.0730 - val_loss: 43.6306
      Epoch 40/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.0399 - val_loss: 44.9567
      Epoch 41/200
      400/400 [==============================] - 1s 3ms/step - loss: 34.7688 - val_loss: 43.1072
      Epoch 42/200
      400/400 [==============================] - 1s 3ms/step - loss: 35.1258 - val_loss: 42.8059
      Epoch 43/200
      400/400 [==============================] - 1s 3ms/step - loss: 34.7072 - val_loss: 43.5322
      Epoch 44/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.5696 - val_loss: 44.1012
      Epoch 45/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.5891 - val_loss: 44.2506
      Epoch 46/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.3206 - val_loss: 45.3664
      Epoch 47/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.2876 - val_loss: 44.6273
      Epoch 48/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.1713 - val_loss: 45.2599
      Epoch 49/200
      400/400 [==============================] - 1s 2ms/step - loss: 34.2650 - val_loss: 44.6314
      Epoch 50/200
      400/400 [==============================] - ETA: 0s - loss: 34.24 - 1s 2ms/step - loss: 34.2552 - val_loss: 45.2242
      Epoch 51/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.8895 - val_loss: 43.9794
      Epoch 52/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.6852 - val_loss: 44.8550
      Epoch 53/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.5471 - val_loss: 45.3954
      Epoch 54/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.7019 - val_loss: 43.7555
      Epoch 55/200
      400/400 [==============================] - 1s 3ms/step - loss: 33.6089 - val_loss: 44.8915
      Epoch 56/200
      400/400 [==============================] - 1s 3ms/step - loss: 33.5575 - val_loss: 44.3777
      Epoch 57/200
      400/400 [==============================] - 1s 3ms/step - loss: 33.2019 - val_loss: 44.1703
      Epoch 58/200
      400/400 [==============================] - 1s 3ms/step - loss: 33.3733 - val_loss: 44.9019
      Epoch 59/200
      400/400 [==============================] - 1s 3ms/step - loss: 33.2552 - val_loss: 45.3215
      Epoch 60/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.0158 - val_loss: 43.9673
      Epoch 61/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.0060 - val_loss: 44.8432
      Epoch 62/200
      400/400 [==============================] - 1s 2ms/step - loss: 33.0322 - val_loss: 44.3373
      Epoch 63/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.9578 - val_loss: 44.2515
      Epoch 64/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.6918 - val_loss: 45.1118
      Epoch 65/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.7622 - val_loss: 44.0465
      Epoch 66/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.6132 - val_loss: 45.0308
      Epoch 67/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.7146 - val_loss: 46.7278
      Epoch 68/200
      400/400 [==============================] - 1s 3ms/step - loss: 32.4178 - val_loss: 43.8807
      Epoch 69/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.3984 - val_loss: 44.1685
      Epoch 70/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.6401 - val_loss: 43.8811
      Epoch 71/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.5273 - val_loss: 43.1624
      Epoch 72/200
      400/400 [==============================] - 1s 3ms/step - loss: 32.1649 - val_loss: 43.8252
      Epoch 73/200
      400/400 [==============================] - 1s 3ms/step - loss: 32.1413 - val_loss: 43.7918
      Epoch 74/200
      400/400 [==============================] - 1s 3ms/step - loss: 32.0813 - val_loss: 44.2760
      Epoch 75/200
      400/400 [==============================] - 1s 3ms/step - loss: 32.3392 - val_loss: 43.6036
      Epoch 76/200
      400/400 [==============================] - 1s 3ms/step - loss: 31.9768 - val_loss: 43.2317
      Epoch 77/200
      400/400 [==============================] - 1s 2ms/step - loss: 32.0166 - val_loss: 44.5886
      Epoch 78/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.5772 - val_loss: 43.7420
      Epoch 79/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.7715 - val_loss: 44.8901
      Epoch 80/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.4898 - val_loss: 45.0284
      Epoch 81/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.8298 - val_loss: 45.2274
      Epoch 82/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.4446 - val_loss: 44.8557
      Epoch 83/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.2813 - val_loss: 45.0784
      Epoch 84/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.2781 - val_loss: 45.0541
      Epoch 85/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.3515 - val_loss: 44.7051
      Epoch 86/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.3783 - val_loss: 45.4680
      Epoch 87/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.2300 - val_loss: 44.4836
      Epoch 88/200
      400/400 [==============================] - 1s 3ms/step - loss: 31.3187 - val_loss: 45.4217
      Epoch 89/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.9401 - val_loss: 45.5830
      Epoch 90/200
      400/400 [==============================] - 1s 3ms/step - loss: 31.0373 - val_loss: 44.7656
      Epoch 91/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.9112 - val_loss: 45.2808
      Epoch 92/200
      400/400 [==============================] - 1s 3ms/step - loss: 31.0525 - val_loss: 45.4185
      Epoch 93/200
      400/400 [==============================] - 1s 4ms/step - loss: 30.9214 - val_loss: 45.0891
      Epoch 94/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.0705 - val_loss: 45.5064
      Epoch 95/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.9782 - val_loss: 44.7325
      Epoch 96/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.5787 - val_loss: 45.4453
      Epoch 97/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.9730 - val_loss: 44.5498
      Epoch 98/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.5769 - val_loss: 44.9062
      Epoch 99/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.5464 - val_loss: 45.4438
      Epoch 100/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.4931 - val_loss: 45.2279
      Epoch 101/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.5522 - val_loss: 46.0299
      Epoch 102/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.3455 - val_loss: 45.7003
      Epoch 103/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.9980 - val_loss: 45.8989
      Epoch 104/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.5480 - val_loss: 45.5638
      Epoch 105/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.3056 - val_loss: 45.3374
      Epoch 106/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.3799 - val_loss: 45.4818
      Epoch 107/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.1108 - val_loss: 45.7756
      Epoch 108/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.1020 - val_loss: 45.9498
      Epoch 109/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.9593 - val_loss: 45.7203
      Epoch 110/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.0418 - val_loss: 46.3655
      Epoch 111/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.9199 - val_loss: 46.4139
      Epoch 112/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.7945 - val_loss: 47.2336
      Epoch 113/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.0151 - val_loss: 46.1614
      Epoch 114/200
      400/400 [==============================] - 1s 2ms/step - loss: 30.0223 - val_loss: 45.0649
      Epoch 115/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.8534 - val_loss: 45.7101
      Epoch 116/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.7386 - val_loss: 45.0888
      Epoch 117/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.7095 - val_loss: 45.6519
      Epoch 118/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.8349 - val_loss: 45.6554
      Epoch 119/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.0295 - val_loss: 45.7702
      Epoch 120/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.6623 - val_loss: 45.3132
      Epoch 121/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.4741 - val_loss: 45.3080
      Epoch 122/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.6534 - val_loss: 45.6453
      Epoch 123/200
      400/400 [==============================] - 1s 3ms/step - loss: 30.2955 - val_loss: 45.8680
      Epoch 124/200
      400/400 [==============================] - 1s 2ms/step - loss: 31.8604 - val_loss: 45.5589
      Epoch 125/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.4722 - val_loss: 45.6427
      Epoch 126/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.2142 - val_loss: 46.5825
      Epoch 127/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.2406 - val_loss: 45.1562
      Epoch 128/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.3252 - val_loss: 45.2745
      Epoch 129/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.1292 - val_loss: 46.5555
      Epoch 130/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.3877 - val_loss: 45.4706
      Epoch 131/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.4331 - val_loss: 46.5556
      Epoch 132/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.1586 - val_loss: 46.8161
      Epoch 133/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.1908 - val_loss: 46.6487
      Epoch 134/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.3301 - val_loss: 47.3752
      Epoch 135/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.0551 - val_loss: 47.2344
      Epoch 136/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.0042 - val_loss: 46.7378
      Epoch 137/200
      400/400 [==============================] - 1s 3ms/step - loss: 29.0343 - val_loss: 46.6015
      Epoch 138/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.9668 - val_loss: 46.4597
      Epoch 139/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.8793 - val_loss: 46.6113
      Epoch 140/200
      400/400 [==============================] - 1s 2ms/step - loss: 29.0064 - val_loss: 46.0277
      Epoch 141/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.8927 - val_loss: 47.1952
      Epoch 142/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.9585 - val_loss: 45.7510
      Epoch 143/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.7321 - val_loss: 47.0833
      Epoch 144/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.6063 - val_loss: 46.6552
      Epoch 145/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.5378 - val_loss: 47.2903
      Epoch 146/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.5642 - val_loss: 47.8808
      Epoch 147/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.8107 - val_loss: 47.4514
      Epoch 148/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.6379 - val_loss: 46.9222
      Epoch 149/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.5444 - val_loss: 46.5014
      Epoch 150/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.7847 - val_loss: 47.6782
      Epoch 151/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.8089 - val_loss: 46.5458
      Epoch 152/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.5354 - val_loss: 46.6576
      Epoch 153/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.3490 - val_loss: 47.2714
      Epoch 154/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.3510 - val_loss: 48.9538
      Epoch 155/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.4716 - val_loss: 46.6303
      Epoch 156/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.3347 - val_loss: 49.2721
      Epoch 157/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.3047 - val_loss: 47.7726
      Epoch 158/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.2383 - val_loss: 48.2155
      Epoch 159/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.5245 - val_loss: 48.7713
      Epoch 160/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.4351 - val_loss: 46.7390
      Epoch 161/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.4588 - val_loss: 49.3369
      Epoch 162/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.2478 - val_loss: 46.5372
      Epoch 163/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.3345 - val_loss: 47.8373
      Epoch 164/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.1176 - val_loss: 48.1571
      Epoch 165/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.2040 - val_loss: 49.2562
      Epoch 166/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.0484 - val_loss: 48.6618
      Epoch 167/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.8847 - val_loss: 49.4634
      Epoch 168/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.9490 - val_loss: 48.4052
      Epoch 169/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.9372 - val_loss: 48.8907
      Epoch 170/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.9248 - val_loss: 48.3905
      Epoch 171/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.8455 - val_loss: 47.6539
      Epoch 172/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.9268 - val_loss: 49.0654
      Epoch 173/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.8577 - val_loss: 47.5273
      Epoch 174/200
      400/400 [==============================] - 1s 2ms/step - loss: 28.0366 - val_loss: 48.4647
      Epoch 175/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.9900 - val_loss: 49.2491
      Epoch 176/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.6247 - val_loss: 47.2545
      Epoch 177/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.5880 - val_loss: 49.2637
      Epoch 178/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.6120 - val_loss: 49.4666
      Epoch 179/200
      400/400 [==============================] - 1s 2ms/step - loss: 27.8659 - val_loss: 50.3816
      Epoch 180/200
      400/400 [==============================] - 1s 3ms/step - loss: 28.0326 - val_loss: 48.0987
      Epoch 181/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.7441 - val_loss: 48.7371
      Epoch 182/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.6904 - val_loss: 48.1568
      Epoch 183/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4572 - val_loss: 47.5962
      Epoch 184/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.7317 - val_loss: 47.1914
      Epoch 185/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.3061 - val_loss: 47.8795
      Epoch 186/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.5390 - val_loss: 47.6364
      Epoch 187/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4495 - val_loss: 47.1032
      Epoch 188/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4365 - val_loss: 48.9359
      Epoch 189/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.5645 - val_loss: 48.3638
      Epoch 190/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.3961 - val_loss: 47.1729
      Epoch 191/200
      400/400 [==============================] - 2s 4ms/step - loss: 27.4712 - val_loss: 47.4878
      Epoch 192/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.2986 - val_loss: 47.9412
      Epoch 193/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4128 - val_loss: 48.3318
      Epoch 194/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.2365 - val_loss: 47.5549
      Epoch 195/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.3118 - val_loss: 48.2725
      Epoch 196/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4922 - val_loss: 47.6526
      Epoch 197/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.1486 - val_loss: 48.1019
      Epoch 198/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.2993 - val_loss: 48.3971
      Epoch 199/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4243 - val_loss: 47.2453
      Epoch 200/200
      400/400 [==============================] - 1s 3ms/step - loss: 27.4716 - val_loss: 46.7690

NotFoundError Traceback (most recent call last)
File :4, in

File ~\anaconda3\lib\site-packages\verstack\tools.py:19, in timer..wrapped(*args, **kwargs)
16 @wraps(func)
17 def wrapped(*args, **kwargs):
18 start = time.time()
---> 19 result = func(*args, **kwargs)
20 end = time.time()
21 elapsed = round(end-start,5)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:615, in Stacker.fit_transform(self, X, y)
613 validate_fit_transform_args(X, y)
614 X_with_stacked_feats = X.reset_index(drop=True).copy()
--> 615 X_with_stacked_feats = self._apply_all_or_extra_layers_to_train(X_with_stacked_feats, y)
616 return X_with_stacked_feats

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:574, in Stacker._apply_all_or_extra_layers_to_train(self, X, y)
572 if layers_added_after_fit_transform:
573 for layer in layers_added_after_fit_transform:
--> 574 X = self._apply_single_layer(layer, X, y)
575 else:
576 # if no extra layers apply all layers on train set
577 X = self._apply_all_layers(X, y)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:510, in Stacker._apply_single_layer(self, layer, X, y)
506 new_feats = self._create_new_feats_in_test(X, y, layer, applicable_feats)
507 # ---------------------------------------------------------------------
508 # create stacked feats in train set
509 else:
--> 510 new_feats = self._create_new_feats_in_train(X, y, layer, applicable_feats)
511 for feat in new_feats:
512 X = pd.concat([X, feat], axis = 1)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:478, in Stacker._create_new_feats_in_train(self, X, y, layer, applicable_feats)
476 for model in self.layers[layer]:
477 feat_name = self._create_feat_name(layer)
--> 478 new_feat = self._get_stack_feat(model, X[applicable_feats], y)
479 # append trained models from buffer to self.trained_models_list for layer/feature
480 self.trained_models[layer][feat_name] = self._trained_models_list_buffer

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:316, in Stacker._get_stack_feat(self, model, X, y)
314 '''Apply stacking features creatin to either train or test set'''
315 if isinstance(y, pd.Series):
--> 316 new_feat = self._train_predict_by_model(model, X, y)
317 else:
318 new_feat = self._predict_by_model(model, X)

File ~\anaconda3\lib\site-packages\verstack\stacking\Stacker.py:303, in Stacker._train_predict_by_model(self, model, X, y)
301 X_test = X.loc[test_ix, :]
302 # create independent model instance for each fold
--> 303 fold_model = copy.deepcopy(model)
304 pred, model = self._train_predict(fold_model, X_train, y_train, X_test)
305 pred_series.loc[test_ix] = pred.flatten()

File ~\anaconda3\lib\copy.py:172, in deepcopy(x, memo, _nil)
170 y = x
171 else:
--> 172 y = _reconstruct(x, memo, *rv)
174 # If is its own copy, don't memoize.
175 if y is not x:

File ~\anaconda3\lib\copy.py:270, in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
268 if state is not None:
269 if deep:
--> 270 state = deepcopy(state, memo)
271 if hasattr(y, 'setstate'):
272 y.setstate(state)

File ~\anaconda3\lib\copy.py:146, in deepcopy(x, memo, _nil)
144 copier = _deepcopy_dispatch.get(cls)
145 if copier is not None:
--> 146 y = copier(x, memo)
147 else:
148 if issubclass(cls, type):

File ~\anaconda3\lib\copy.py:230, in _deepcopy_dict(x, memo, deepcopy)
228 memo[id(x)] = y
229 for key, value in x.items():
--> 230 y[deepcopy(key, memo)] = deepcopy(value, memo)
231 return y

File ~\anaconda3\lib\copy.py:153, in deepcopy(x, memo, _nil)
151 copier = getattr(x, "deepcopy", None)
152 if copier is not None:
--> 153 y = copier(memo)
154 else:
155 reductor = dispatch_table.get(cls)

File ~\anaconda3\lib\site-packages\keras\engine\training.py:329, in Model.deepcopy(self, memo)
326 def deepcopy(self, memo):
327 if self.built:
328 new = pickle_utils.deserialize_model_from_bytecode(
--> 329 *pickle_utils.serialize_model_as_bytecode(self))
330 memo[id(self)] = new
331 else:
332 # See comment in reduce for explanation

File ~\anaconda3\lib\site-packages\keras\saving\pickle_utils.py:77, in serialize_model_as_bytecode(model)
75 with tf.io.gfile.GFile(dest_path, "rb") as f:
76 info = tarfile.TarInfo(name=os.path.relpath(dest_path, temp_dir))
---> 77 info.size = f.size()
78 archive.addfile(tarinfo=info, fileobj=f)
79 tf.io.gfile.rmtree(temp_dir)

File ~\anaconda3\lib\site-packages\tensorflow\python\lib\io\file_io.py:99, in FileIO.size(self)
97 def size(self):
98 """Returns the size of the file."""
---> 99 return stat(self.__name).length

File ~\anaconda3\lib\site-packages\tensorflow\python\lib\io\file_io.py:910, in stat(filename)
897 @tf_export(v1=["gfile.Stat"])
898 def stat(filename):
899 """Returns file statistics for a given path.
900
901 Args:
(...)
908 errors.OpError: If the operation fails.
909 """
--> 910 return stat_v2(filename)

File ~\anaconda3\lib\site-packages\tensorflow\python\lib\io\file_io.py:926, in stat_v2(path)
913 @tf_export("io.gfile.stat")
914 def stat_v2(path):
915 """Returns file statistics for a given path.
916
917 Args:
(...)
924 errors.OpError: If the operation fails.
925 """
--> 926 return _pywrap_file_io.Stat(compat.path_to_str(path))

NotFoundError:

@chrissny88
Copy link
Author

this is the code:

from verstack import Stacker

stacker = Stacker(objective = 'regression', auto = True)
X= stacker.fit_transform(X, y)
test = stacker.transform(test)

layer_1_feats = stacker.stacked_features['layer_1']
layer_2_feats = stacker.stacked_features['layer_2']

model = DecisionTreeRegressor()

model.fit(X[layer_2_feats], y)
pred = model.predict(test[layer_2_feats])

@DanilZherebtsov
Copy link
Owner

I noticed that you numpy and pandas version are not in line with the requirements. Can you reinstall the following:
pip install numpy==1.19.5
pip install pandas==1.3.0

@DanilZherebtsov
Copy link
Owner

And which python version are you using?

@chrissny88
Copy link
Author

3.9.12 (main, Apr 4 2022, 05:22:27) [MSC v.1916 64 bit (AMD64)]

@DanilZherebtsov
Copy link
Owner

I've made a clean virtual environment, installed only python 3.9.12 and verstack and still didn't get the error. Can you try the same. I presume you might have some conflict with your local environment.

$ cd folder_with_your_data
$ virtualenv venv --python=python3
$ source venv/bin/activate
$ pip install verstack
$ python your_code_that_loads_the_data_and_runs_stacker.py

@DanilZherebtsov
Copy link
Owner

DanilZherebtsov commented Oct 6, 2022

And by looking at your loss during training stacker I can see that we are using different data.

From the data you provided:

  1. Which .csv is used for X and y?
  2. What is the column name for 'y' in that data?
  3. Which .csv is used for your 'test' object?
  4. Did you remove the target (y) column from your test set?

@chrissny88
Copy link
Author

I've made a clean virtual environment, installed only python 3.9.12 and verstack and still didn't get the error. Can you try the same. I presume you might have some conflict with your local environment.

$ cd folder_with_your_data $ virtualenv venv --python=python3 $ source venv/bin/activate $ pip install verstack $ python your_code_that_loads_the_data_and_runs_stacker.py

you were right , I tried with google colab and everything seems to work fine

@chrissny88
Copy link
Author

you said that i should use the second layer outputs as inputs in to the final meta_model, why not layer 1 or both layers?

@DanilZherebtsov
Copy link
Owner

you were right , I tried with google colab and everything seems to work fine

Great. It's good practice to launch new projects in isolated environments, that way you'll be in full control of all the required depencies.

@DanilZherebtsov
Copy link
Owner

you said that i should use the second layer outputs as inputs in to the final meta_model, why not layer 1 or both layers?

Not exactly. You can use the features from layer_1 or from layer_2, or from both layers, or combine them with metafeats that stacker generates, or even throw in the original X feats. It is a matter of experimentation and is subject to the final meta model that you are using.
A rule of thumb is: if you use the latter layers as feats, the meta model should be simpler. But it is all individual and highly dependent on your data.

@chrissny88
Copy link
Author

Noted..thanks

Can I right the code as below if I want to try 2 different algorithms for the final model just to avoid training each separately and hence save time.
``
stacker = Stacker(objective = 'binary', auto = True)
train_X = stacker.fit_transform(train_X,train_y)
val_X = stacker.transform(val_X)
test = stacker.transform(test)

get lists of features created in each layer

layer_1_feats = stacker.stacked_features['layer_1']
layer_2_feats = stacker.stacked_features['layer_2']

model = CatBoostClassifier(random_state=1)
model1 = LGBMClassifier(random_state=1)

use only the second layer outputs as inputs in to the final meta_model

#model.fit(train_X[layer_2_feats], train_y)
#pred = model.predict(df1[layer_2_feats])``

@DanilZherebtsov
Copy link
Owner

DanilZherebtsov commented Oct 10, 2022

The code you wrote seems perfectly fine. Only I don't see how you make use of the two models you have in mind.

model = CatBoostClassifier(random_state=1)
model1 = LGBMClassifier(random_state=1)

You have initialised model and model1 and then what?

If you want to compare the performance of different final (meta) models, then you can:

def score_model(model, X_train, y_train, X_val, y_val, metric_func):
    model.fit(X_train, y_train)
    pred = model.predict(X_val)
    score = metric_func(y_val, pred)
    print(f'Score: {score}')

from sklearn.metrics import mean_squared_error as mse
# score your CatBoostClassifier
score_model(model, X_train[layer_2_feats], y_train, X_val[layer_2_feats], y_val, mse)

# score your LGBMClassifier
score_model(model1, X_train[layer_2_feats], y_train, X_val[layer_2_feats], y_val, mse)

Let me know if that was what you were looking for.

@chrissny88
Copy link
Author

Thanks, that's exactly what I wanted and it worked perfectly

@ishanchokshi
Copy link

ishanchokshi commented Dec 13, 2022

@chrissny88 How did you resolve the key error as I am facing the same issue?

@DanilZherebtsov
Copy link
Owner

@chrissny88 How did you resolve the ley error as I am facing the same issue?

Can you provide the code you are using to get the error and a error stack trace?
Also please print the libraries versions you are using:

import verstack
import lightgbm 
import optuna
import pandas
import numpy
import sklearn
print(f'verstack:    {verstack.__version__}')
print(f'lightgbm:    {lightgbm.__version__}')
print(f'sklearn:     {sklearn.__version__}')
print(f'optuna:      {optuna.__version__}')
print(f'pandas:      {pandas.__version__}')
print(f'numpy:       {numpy.__version__}')

@chrissny88
Copy link
Author

Make sure the indexes are same in X_train and y_train

X_train.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)

@ishanchokshi
Copy link

Thanks, this worked!

@chrissny88
Copy link
Author

Thanks, this worked!

You are welcome.
Are you using google colab or Jupyter notebook?

@ishanchokshi
Copy link

I am using google colab

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants