Skip to content
Permalink
Browse files

predict and get_device

  • Loading branch information...
jhfjhfj1 committed Mar 14, 2019
1 parent 14ca990 commit 3906aec886f5a4e40ca7a5e2a2b5293830b79ecb
@@ -31,3 +31,11 @@ def regression_loss(cls, prediction, target):
@classmethod
def binary_classification_loss(cls, prediction, target):
return cls.backend.binary_classification_loss(prediction, target)

@classmethod
def predict(cls, model, loader):
return cls.backend.predict(model, loader)

@classmethod
def get_device(cls):
return cls.backend.get_device()
@@ -1,4 +1,17 @@
import numpy as np
from functools import reduce
from autokeras.backend.torch.model import produce_model
from autokeras.backend.torch.data_transformer import ImageDataTransformer
from autokeras.backend.torch.model_trainer import ModelTrainer
from autokeras.backend.torch.model_trainer import ModelTrainer, get_device
from autokeras.backend.torch.loss_function import *


def predict(torch_model, loader):
outputs = []
with torch.no_grad():
for index, inputs in enumerate(loader):
outputs.append(torch_model(inputs).numpy())
output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
return output


@@ -32,6 +32,13 @@
from autokeras.text.pretrained_bert.optimization import BertAdam, warmup_linear


def get_device():
""" If CUDA is available, use CUDA device, else use CPU device.
Returns: string device name
"""
return 'cuda' if torch.cuda.is_available() else 'cpu'


class ModelTrainer(ModelTrainerBase):
"""A class that is used to train the model.
This class can train a Pytorch model with the given data loaders.
@@ -47,6 +54,8 @@ class ModelTrainer(ModelTrainerBase):

def __init__(self, model, path, **kwargs):
super().__init__(**kwargs)
if self.device is None:
self.device = get_device()
self.model = model
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
@@ -12,7 +12,6 @@
from autokeras.constant import Constant
from autokeras.nn.model_trainer import ModelTrainerBase
from autokeras.unsupervised import Unsupervised
from autokeras.utils import get_device


class DCGAN(Unsupervised):
@@ -37,7 +36,7 @@ def __init__(self, nz=100, ngf=32, ndf=32, nc=3, verbose=False, gen_training_res
self.ndf = ndf
self.nc = nc
self.verbose = verbose
self.device = get_device()
self.device = Backend.get_device()
self.gen_training_result = gen_training_result
self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION
self.data_transformer = None
@@ -63,7 +62,7 @@ def fit(self, x_train):
Backend.binary_classification_loss,
self.verbose,
self.gen_training_result,
device=get_device()).train_model()
device=Backend.get_device()).train_model()

def generate(self, input_sample=None):
if input_sample is None:
@@ -1,17 +1,12 @@
from functools import reduce

import torch
import numpy as np

import os
import time

from autokeras.backend import Backend
from autokeras.constant import Constant
from autokeras.search import BayesianSearcher, train

from autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir
from autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator
from autokeras.utils import get_device


class NetworkModule:
@@ -117,30 +112,7 @@ def predict(self, test_loader):
model = self.best_model.produce_model()
model.eval()

outputs = []
with torch.no_grad():
for index, inputs in enumerate(test_loader):
outputs.append(model(inputs).numpy())
output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
return output

def evaluate(self, test_data):
"""Evaluate the performance of the best architecture in terms of the loss.
Args:
test_data: A DataLoader instance representing the testing data.
"""
model = self.best_model.produce_model()
model.eval()
device = get_device()
target, prediction = [], []

with torch.no_grad():
for _, (x, y) in enumerate(test_data):
x, y = x.to(device), y.to(device)
prediction.append(model(x))
target.append(y)
return self.metric().compute(prediction, target)
return Backend.predict(model, test_loader)


class CnnModule(NetworkModule):
@@ -1,7 +1,5 @@
import abc

from autokeras.utils import get_device


class ModelTrainerBase(abc.ABC):
""" A base class all model trainers will inherit from.
@@ -27,10 +25,7 @@ def __init__(self,
metric=None,
verbose=False,
device=None):
if device:
self.device = device
else:
self.device = get_device()
self.device = device
self.metric = metric
self.verbose = verbose
self.loss_function = loss_function
@@ -51,5 +46,3 @@ def train_model(self,
if the model still makes no improvement, finish training.
"""
pass


@@ -1,7 +1,9 @@
import os

from abc import ABC, abstractmethod
from autokeras.utils import temp_path_generator, ensure_dir, download_file_from_google_drive, get_device

from autokeras.backend import Backend
from autokeras.utils import temp_path_generator, ensure_dir, download_file_from_google_drive


class Pretrained(ABC):
@@ -11,7 +13,7 @@ def __init__(self, verbose=True, model_path=None):
"""Initialize the instance."""
self.verbose = verbose
self.model = None
self.device = get_device()
self.device = Backend.get_device()
self.model_path = model_path if model_path is not None else temp_path_generator()
ensure_dir(self.model_path)
self.local_paths = [os.path.join(self.model_path, x.local_name) for x in self._google_drive_files]
@@ -13,7 +13,6 @@

from autokeras.constant import Constant
from autokeras.pretrained.base import Pretrained
from autokeras.utils import get_device, download_file_from_google_drive, temp_path_generator, ensure_dir


def weights_init(m):
@@ -18,9 +18,9 @@
from torch.nn import functional
from torch.nn import init as init

from autokeras.backend import Backend
from autokeras.constant import Constant
from autokeras.pretrained.base import Pretrained
from autokeras.utils import get_device, temp_path_generator, ensure_dir, download_file_from_google_drive

"""VOC Dataset Classes
@@ -488,7 +488,7 @@ class ObjectDetector(Pretrained):
def __init__(self):
super(ObjectDetector, self).__init__()
self.model = None
self.device = get_device()
self.device = Backend.get_device()
# load net
num_classes = len(VOC_CLASSES) + 1 # +1 for background
self.model = self._build_ssd('test', 300, num_classes) # initialize SSD
@@ -9,7 +9,6 @@
from autokeras.constant import Constant
from autokeras.pretrained.base import Pretrained
from autokeras.pretrained.voice_generator.deepvoice3_pytorch import frontend, builder
from autokeras.utils import temp_path_generator, ensure_dir, get_device, download_file_from_google_drive


# NOTE: If you want full control for model architecture. please take a look
@@ -1,10 +1,8 @@
import os
from abc import ABC, abstractmethod
from sklearn.model_selection import train_test_split
import torch
import numpy as np
from functools import reduce

from autokeras.backend import Backend
from autokeras.constant import Constant
from autokeras.net_module import CnnModule
from autokeras.search import BayesianSearcher, train
@@ -97,6 +95,7 @@ def final_fit(self, x_train, y_train, x_test, y_test, trainer_args=None, retrain
"""
pass


class DeepTaskSupervised(SearchSupervised):
"""
Inherits from SearchSupervised class.
@@ -170,8 +169,8 @@ def fit(self, x, y, time_limit=None):
validation_set_size = min(validation_set_size, 500)
validation_set_size = max(validation_set_size, 1)
x_train, x_valid, y_train, y_valid = train_test_split(x, y,
test_size=validation_set_size,
random_state=42)
test_size=validation_set_size,
random_state=42)
# DEVELOPERS - WHY DOES THIS TRANSFORMER OCCUR AFTER SPLITTING THE DATA?
self.init_transformer(x)
# Transform x_train
@@ -227,9 +226,11 @@ def loss(self):
@abstractmethod
def get_n_output_node(self):
pass

@staticmethod
def transform_y(y_train):
return y_train

@staticmethod
def inverse_transform_y(output):
return output
@@ -295,6 +296,7 @@ class SingleModelSupervised(Supervised):
graph: DEFINED IN __init__() BUT PURPOSE UNCLEAR
data_transformer: DEFINED IN __init__() BUT PURPOSE UNCLEAR
"""

def __init__(self, verbose=False, path=None):
"""Initialize the instance of the SingleModelSupervised class.
@@ -349,11 +351,7 @@ def predict(self, x_test):
model = self.graph.produce_model()
model.eval()

outputs = []
with torch.no_grad():
for index, inputs in enumerate(test_loader):
outputs.append(model(inputs).numpy())
output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
output = Backend.predict(model, test_loader)
return self.inverse_transform_y(output)

def evaluate(self, x_test, y_test):
@@ -397,6 +395,7 @@ class PortableDeepSupervised(SingleModelSupervised, ABC):
path: A string value indicating the path to the directory where the intermediate model results
are stored
"""

def __init__(self, graph, y_encoder, data_transformer, verbose=False, path=None):
"""Initialize the instance of the PortableDeepSupervised class.
@@ -22,15 +22,13 @@
import torch

from autokeras.backend import Backend
from autokeras.backend.torch.loss_function import classification_loss
from autokeras.nn.metric import Accuracy
from autokeras.backend.torch.model_trainer import BERTTrainer
from autokeras.supervised import SingleModelSupervised
from autokeras.text.pretrained_bert.utils import PYTORCH_PRETRAINED_BERT_CACHE
from autokeras.text.pretrained_bert.modeling import BertForSequenceClassification
from autokeras.text.pretrained_bert.utils import convert_examples_to_features
from autokeras.text.pretrained_bert.tokenization import BertTokenizer
from autokeras.utils import get_device
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler


@@ -53,7 +51,7 @@ def __init__(self, verbose, **kwargs):
verbose: Mode of verbosity.
"""
super().__init__(**kwargs)
self.device = get_device()
self.device = Backend.get_device()
self.verbose = verbose

# BERT specific
@@ -53,49 +53,6 @@ def pickle_to_file(obj, path):
pickle.dump(obj, open(path, 'wb'))


def get_device():
""" If CUDA is available, use CUDA device, else use CPU device.
Returns: string device name
"""
return 'cuda' if torch.cuda.is_available() else 'cpu'


# # TODO cannot detect nvidia-smi in Windows normally. We need a fall back for windows
# def get_device():
# """ If CUDA is available, use CUDA device, else use CPU device.
# When choosing from CUDA devices, this function will choose the one with max memory available.
# Returns: string device name.
# """
# # TODO: could use gputil in the future
# device = 'cpu'
# if torch.cuda.is_available():
# try:
# # smi_out=
# # Free : xxxxxx MiB
# # Free : xxxxxx MiB
# # ....
# smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)
# if isinstance(smi_out, bytes):
# smi_out = smi_out.decode('utf-8')
# except subprocess.SubprocessError:
# warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')
# return 'cpu'
# visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')
# if len(visible_devices) == 1 and visible_devices[0] == '':
# visible_devices = []
# visible_devices = [int(x) for x in visible_devices]
# memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
# for cuda_index, _ in enumerate(memory_available):
# if cuda_index not in visible_devices and visible_devices:
# memory_available[cuda_index] = 0
# memory_available = list(filter(lambda a: a != 2, memory_available))
# if memory_available:
# if max(memory_available) != 0 and torch.cuda.device_count() == 1:
# device = 'cuda:' + str(memory_available.index(max(memory_available)))
# elif max(memory_available) != 0 and torch.cuda.device_count() > 1:
# device = 'cuda:0'
# return device


def temp_path_generator():
sys_temp = tempfile.gettempdir()

0 comments on commit 3906aec

Please sign in to comment.
You can’t perform that action at this time.