Skip to content

Commit

Permalink
Merge e821018 into 5812026
Browse files Browse the repository at this point in the history
  • Loading branch information
toodef committed Jul 15, 2020
2 parents 5812026 + e821018 commit c48e5de
Show file tree
Hide file tree
Showing 35 changed files with 1,289 additions and 1,361 deletions.
4 changes: 2 additions & 2 deletions examples/files/img_classification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from neural_pipeline.builtin.monitors.tensorboard import TensorboardMonitor
from neural_pipeline import DataProducer, AbstractDataset, TrainConfig, TrainStage,\
from piepline.builtin.monitors.tensorboard import TensorboardMonitor
from piepline import DataProducer, AbstractDataset, TrainConfig, TrainStage,\
ValidationStage, Trainer, FileStructManager

import torch
Expand Down
20 changes: 11 additions & 9 deletions examples/files/img_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
from albumentations import Compose, HorizontalFlip, VerticalFlip, RandomRotate90, RandomGamma, \
RandomBrightnessContrast, RGBShift, Resize, RandomCrop, OneOf

from neural_pipeline import Trainer
from neural_pipeline.builtin.models.albunet import resnet18
from neural_pipeline.data_producer import AbstractDataset, DataProducer
from neural_pipeline.monitoring import LogMonitor
from neural_pipeline.train_config import AbstractMetric, MetricsProcessor, MetricsGroup, TrainStage, ValidationStage, TrainConfig
from neural_pipeline.utils.fsm import FileStructManager
from neural_pipeline.builtin.monitors.tensorboard import TensorboardMonitor
from piepline import Trainer
from piepline.builtin.models.albunet import resnet18
from piepline.data_producer import AbstractDataset, DataProducer
from piepline.monitoring import LogMonitor
from piepline.train_config import AbstractMetric, MetricsProcessor, MetricsGroup, TrainStage, ValidationStage, TrainConfig
from piepline.utils.fsm import FileStructManager
from piepline.builtin.monitors.tensorboard import TensorboardMonitor

###################################
# Define dataset and augmentations
Expand Down Expand Up @@ -119,15 +119,17 @@ class DiceMetric(AbstractMetric):
def __init__(self):
super().__init__('dice')

def calc(self, output: torch.Tensor, target: torch.Tensor) -> np.ndarray or float:
@staticmethod
def calc(output: torch.Tensor, target: torch.Tensor) -> np.ndarray or float:
return dice(output, target)


class JaccardMetric(AbstractMetric):
def __init__(self):
super().__init__('jaccard')

def calc(self, output: torch.Tensor, target: torch.Tensor) -> np.ndarray or float:
@staticmethod
def calc(output: torch.Tensor, target: torch.Tensor) -> np.ndarray or float:
return jaccard(output, target)


Expand Down
12 changes: 6 additions & 6 deletions examples/files/resume_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
import torch
import numpy as np

from neural_pipeline import Trainer
from neural_pipeline.builtin.models.albunet import resnet18
from neural_pipeline.builtin.monitors.tensorboard import TensorboardMonitor
from neural_pipeline.monitoring import LogMonitor
from neural_pipeline.train_config import TrainConfig
from neural_pipeline.utils import FileStructManager
from piepline import Trainer
from piepline.builtin.models.albunet import resnet18
from piepline.builtin.monitors.tensorboard import TensorboardMonitor
from piepline.monitoring import LogMonitor
from piepline.train_config import TrainConfig
from piepline.utils import FileStructManager

from examples.files.img_segmentation import train_stage, val_stage

Expand Down
20 changes: 12 additions & 8 deletions piepline/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
__version__ = '0.2.2'
__version__ = '0.3.0'

from .data_producer import *
from .data_processor import *
from .train_config import *
from .utils import *
from .monitoring import MonitorHub, AbstractMonitor, ConsoleMonitor
from .train import Trainer
from .predict import Predictor
from piepline.utils.events_system import EventsContainer

events_container = EventsContainer()

# from .data_producer import *
# from .data_processor import *
# from .train_config import *
# from .utils import *
# from .monitoring import *
# from .train import Trainer
# from .predict import Predictor
5 changes: 3 additions & 2 deletions piepline/builtin/monitors/mpl.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@

import numpy as np

from piepline import AbstractMonitor
from piepline.train_config import MetricsGroup
from piepline.monitoring.monotors import AbstractMonitor
from piepline.train_config.metrics import MetricsGroup


class MPLMonitor(AbstractMonitor):
Expand Down Expand Up @@ -98,6 +98,7 @@ def realtime(self, is_realtime: bool) -> 'MPLMonitor':
:return: self object
"""
self._realtime = is_realtime
return self

def __exit__(self, exc_type, exc_val, exc_tb):
plt.show()
Expand Down
81 changes: 42 additions & 39 deletions piepline/builtin/monitors/tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,18 @@

import os
import numpy as np
from torch.nn import Module

try:
from tensorboardX import SummaryWriter
except ImportError:
print("Looks like tensorboardX doesn't installed. Install in via 'pip install tensorboardX' and try again")
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
print("Can't import tensorboard. Try to install tensorboardX or update PyTorch version")

from piepline.monitoring import AbstractMonitor
from piepline.data_processor import Model
from piepline.train_config import AbstractMetric, MetricsGroup
from piepline.monitoring.monotors import AbstractMonitor
from piepline.train_config.metrics import AbstractMetric, MetricsGroup
from piepline.utils.fsm import FileStructManager, FolderRegistrable

import warnings
Expand All @@ -32,27 +35,27 @@ class TensorboardMonitor(AbstractMonitor, FolderRegistrable):

def __init__(self, fsm: FileStructManager, is_continue: bool, network_name: str = None):
super().__init__()
self.__writer = None
self.__txt_log_file = None
self._writer = None
self._txt_log_file = None

fsm.register_dir(self)
dir = fsm.get_path(self)
if dir is None:
directory = fsm.get_path(self)
if directory is None:
return

dir = os.path.join(dir, network_name) if network_name is not None else dir
directory = os.path.join(directory, network_name) if network_name is not None else directory

if not (fsm.in_continue_mode() or is_continue) and os.path.exists(dir) and os.path.isdir(dir):
if not (fsm.in_continue_mode() or is_continue) and os.path.exists(directory) and os.path.isdir(directory):
idx = 0
tmp_dir = dir + "_v{}".format(idx)
tmp_dir = directory + "_v{}".format(idx)
while os.path.exists(tmp_dir) and os.path.isdir(tmp_dir):
idx += 1
tmp_dir = dir + "_v{}".format(idx)
dir = tmp_dir
tmp_dir = directory + "_v{}".format(idx)
directory = tmp_dir

os.makedirs(dir, exist_ok=True)
self.__writer = SummaryWriter(dir)
self.__txt_log_file = open(os.path.join(dir, "log.txt"), 'a' if is_continue else 'w')
os.makedirs(directory, exist_ok=True)
self._writer = SummaryWriter(directory)
self._txt_log_file = open(os.path.join(directory, "log.txt"), 'a' if is_continue else 'w')

def update_metrics(self, metrics: {}) -> None:
"""
Expand All @@ -68,13 +71,13 @@ def update_losses(self, losses: {}) -> None:
:param losses: losses values with keys 'train' and 'validation'
"""
if self.__writer is None:
if self._writer is None:
return

def on_loss(name: str, values: np.ndarray) -> None:
self.__writer.add_scalars('loss', {name: np.mean(values)}, global_step=self.epoch_num)
self.__writer.add_histogram('{}/loss_hist'.format(name), np.clip(values, -1, 1).astype(np.float32),
global_step=self.epoch_num, bins=np.linspace(-1, 1, num=11).astype(np.float32))
self._writer.add_scalars('loss', {name: np.mean(values)}, global_step=self.epoch_num)
self._writer.add_histogram('{}/loss_hist'.format(name), np.clip(values, -1, 1).astype(np.float32),
global_step=self.epoch_num, bins=np.linspace(-1, 1, num=11).astype(np.float32))

self._iterate_by_losses(losses, on_loss)

Expand All @@ -88,28 +91,28 @@ def _update_metrics(self, metrics: [AbstractMetric], metrics_groups: [MetricsGro
def process_metric(cur_metric, parent_tag: str = None):
def add_histogram(name: str, vals, step_num, bins):
try:
self.__writer.add_histogram(name, vals, step_num, bins)
except:
self._writer.add_histogram(name, vals, step_num, bins)
except Exception:
pass

tag = lambda name: name if parent_tag is None else '{}/{}'.format(parent_tag, name)

if isinstance(cur_metric, MetricsGroup):
for m in cur_metric.metrics():
if m.get_values().size > 0:
self.__writer.add_scalars(tag(m.name()), {m.name(): np.mean(m.get_values())}, global_step=self.epoch_num)
self._writer.add_scalars(tag(m.name()), {m.name(): np.mean(m.get_values())}, global_step=self.epoch_num)
add_histogram(tag(m.name()) + '_hist',
np.clip(m.get_values(), m.min_val(), m.max_val()).astype(np.float32),
self.epoch_num, np.linspace(m.min_val(), m.max_val(), num=11).astype(np.float32))
else:
values = cur_metric.get_values().astype(np.float32)
if values.size > 0:
self.__writer.add_scalar(tag(cur_metric.name()), float(np.mean(values)), global_step=self.epoch_num)
self._writer.add_scalar(tag(cur_metric.name()), float(np.mean(values)), global_step=self.epoch_num)
add_histogram(tag(cur_metric.name()) + '_hist',
np.clip(values, cur_metric.min_val(), cur_metric.max_val()).astype(np.float32),
self.epoch_num, np.linspace(cur_metric.min_val(), cur_metric.max_val(), num=11).astype(np.float32))

if self.__writer is None:
if self._writer is None:
return

for metric in metrics:
Expand All @@ -129,7 +132,7 @@ def update_scalar(self, name: str, value: float, epoch_idx: int = None) -> None:
:param value: scalar value
:param epoch_idx: epoch idx. If doesn't set - use last epoch idx stored in this class
"""
self.__writer.add_scalar(name, value, global_step=(epoch_idx if epoch_idx is not None else self.epoch_num))
self._writer.add_scalar(name, value, global_step=(epoch_idx if epoch_idx is not None else self.epoch_num))

def write_to_txt_log(self, text: str, tag: str = None) -> None:
"""
Expand All @@ -138,29 +141,29 @@ def write_to_txt_log(self, text: str, tag: str = None) -> None:
:param text: text that will be writed
:param tag: tag
"""
self.__writer.add_text("log" if tag is None else tag, text, self.epoch_num)
self._writer.add_text("log" if tag is None else tag, text, self.epoch_num)
text = "Epoch [{}]".format(self.epoch_num) + ": " + text
self.__txt_log_file.write(text + '\n')
self.__txt_log_file.flush()
self._txt_log_file.write(text + '\n')
self._txt_log_file.flush()

def visualize_model(self, model: Model, tensor) -> None:
def visualize_model(self, model: Module, tensor) -> None:
"""
Visualize model graph
:param model: :class:`torch.nn.Module` object
:param tensor: dummy input for trace model
"""
self.__writer.add_graph(model, tensor)
self._writer.add_graph(model, tensor)

def close(self):
if self.__txt_log_file is not None:
self.__txt_log_file.close()
self.__txt_log_file = None
del self.__txt_log_file
if self.__writer is not None:
self.__writer.close()
self.__writer = None
del self.__writer
if self._txt_log_file is not None:
self._txt_log_file.close()
self._txt_log_file = None
del self._txt_log_file
if self._writer is not None:
self._writer.close()
self._writer = None
del self._writer

def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
Expand Down
2 changes: 0 additions & 2 deletions piepline/data_processor/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +0,0 @@
from .data_processor import DataProcessor, TrainDataProcessor
from .model import Model

0 comments on commit c48e5de

Please sign in to comment.