Skip to content

Commit

Permalink
Merge pull request #380 from ClimbsRocks/dev5
Browse files Browse the repository at this point in the history
Dev5
  • Loading branch information
ClimbsRocks committed Feb 8, 2018
2 parents 8887a36 + 92a8a4f commit a88620d
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 37 deletions.
5 changes: 0 additions & 5 deletions auto_ml/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,3 @@
"""
For examples, please reference https://github.com/ClimbsRocks/auto_ml
"""

from auto_ml.predictor import Predictor
from auto_ml._version import __version__
from auto_ml.utils_models import load_ml_model
28 changes: 15 additions & 13 deletions auto_ml/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1429,18 +1429,20 @@ def train_one_categorical_model(category, relevant_X, relevant_y):
}
return result

pool = pathos.multiprocessing.ProcessPool()

# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass

if os.environ.get('is_test_suite', False) == 'True':
# If this is the test_suite, do not run things in parallel
results = list(map(lambda x: train_one_categorical_model(x[0], x[1], x[2]), categories_and_data))
else:

pool = pathos.multiprocessing.ProcessPool()

# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass

try:
results = list(pool.map(lambda x: train_one_categorical_model(x[0], x[1], x[2]), categories_and_data))
except RuntimeError:
Expand All @@ -1450,12 +1452,12 @@ def train_one_categorical_model(category, relevant_X, relevant_y):
results = list(pool.map(lambda x: train_one_categorical_model(x[0], x[1], x[2]), categories_and_data))
sys.setrecursionlimit(original_recursion_limit)

# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass
# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass

for result in results:
if result['trained_category_model'] is not None:
Expand Down
33 changes: 14 additions & 19 deletions auto_ml/utils_ensembling.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,34 +42,29 @@ def get_predictions_for_one_estimator(estimator, X):
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)

else:
# Open a new multiprocessing pool
pool = pathos.multiprocessing.ProcessPool()

# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass

# Pathos doesn't like datasets beyond a certain size. So fall back on single, non-parallel predictions instead.
# try:
if os.environ.get('is_test_suite', False) == 'True':
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)

else:
# Open a new multiprocessing pool
pool = pathos.multiprocessing.ProcessPool()

# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass
predictions_from_all_estimators = pool.map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)

# except:
# predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)
# predictions_from_all_estimators = list(predictions_from_all_estimators)


# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass
# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass

predictions_from_all_estimators = list(predictions_from_all_estimators)

Expand Down

0 comments on commit a88620d

Please sign in to comment.