Skip to content

Commit

Permalink
for travis
Browse files Browse the repository at this point in the history
  • Loading branch information
dingguanglei committed Nov 21, 2018
1 parent 3fab65c commit cd19884
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 47 deletions.
14 changes: 8 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,14 @@ training and valid_epoch data, configures info and checkpoint were save in `log`
processing data and configures.

Although it is just an example, you still can build your own project
easily by using jdit framework. Jdit framework can deal with \* Data
visualization. (learning curves, images in pilot process) \* CPU, GPU or
GPUs. (Training your model on specify devices) \* Intermediate data
storage. (Saving training data into a csv file) \* Model checkpoint
automatically. \* Flexible templates can be used to integrate and custom
overrides. So, let's see what is **jdit**.
easily by using jdit framework. Jdit framework can deal with
* Data visualization. (learning curves, images in pilot process)
* CPU, GPU or GPUs. (Training your model on specify devices)
* Intermediate data storage. (Saving training data into a csv file)
* Model checkpoint automatically.
* Flexible templates can be used to integrate and custom overrides.

So, let's see what is **jdit** and build your own project.

## Build your own trainer

Expand Down
1 change: 0 additions & 1 deletion jdit/unittest/test_loger.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

class TestLoger(TestCase):
def test_regist_config(self):
pass
log = Loger()
param = torch.nn.Linear(10, 1)
opt = Optimizer(param.parameters(), lr=0.999, weight_decay=0.03, momentum=0.5, betas=(0.1, 0.4),
Expand Down
6 changes: 0 additions & 6 deletions jdit/unittest/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,30 +7,25 @@

class TestModel(TestCase):
def setUp(self):
pass
self.mode = Sequential(Conv2d(10, 1, 3, 1, 0))
self.epoch = 32

def test_define(self):
pass
net = Model()
assert net.model is None
net.define(self.mode, [], "kaiming", show_structure=False)
assert net.model is not None

def test_print_network(self):
pass
net = Model(self.mode, show_structure=False)
assert net.model is not None

def test_weightsInit(self):
pass
net = Model()
net.init_fc = init.kaiming_normal_
self.mode.apply(net._weight_init)

def test_loadModel(self):
pass
print(self.mode)
net = Model(self.mode, show_structure=False)
net.check_point("tm", self.epoch, "test_model")
Expand All @@ -41,7 +36,6 @@ def test_loadModel(self):


def test_loadPoint(self):
pass
net = Model(self.mode, show_structure=False)
net.check_point("tm", self.epoch, "test_model")
net.load_point("tm", self.epoch, "test_model")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
from unittest import TestCase
from ..parallel import SupParallelTrainer


class TestSuperTrainerParallel(TestCase):
class TestSupParallelTrainer(TestCase):
def setUp(self):
pass
self.default_params = {'data_root': r"datasets/fashion_data",
'gpu_ids_abs': [],
'depth': 4,
Expand Down Expand Up @@ -59,11 +57,14 @@ def setUp(self):
self.trainers_list = ["Trainer_A", "Trainer_B", "Trainer_C", "Trainer_D"]
self.pt = SupParallelTrainer(self.default_params, self.unfixed_params)

def test_build_task_trainer(self):
self.fail()

def test_train(self):
pass
self.fail()

def test__startTrain(self):
pass
def test__start_train(self):
self.fail()

def test__distribute_task_on_devices(self):
pass
Expand Down Expand Up @@ -105,33 +106,7 @@ def test__distribute_task_on_devices(self):
gpu_used_plan = self.pt._distribute_task_on_devices(candidate_params_list)
self.assertEqual(real_gpu_used_plan, gpu_used_plan)

def test__get_gpu_ids_abs(self):
pass
candidate_params = [
{'gpu_ids_abs': [],
'logdir': r"log/tresnet_24d_16m_1"
},
{'gpu_ids_abs': [1, 2],
'logdir': r"log/tresnet_24d_16m_1"
},
{'gpu_ids_abs': [1],
'logdir': r"log/tresnet_24d_16m_1"
}
]
gpu_ids_abs = self.pt._get_gpu_ids_abs(candidate_params)
self.assertEqual(gpu_ids_abs, [[], [1, 2], [1]])

def test__check_overlap(self):
pass
gpuids_tuple = self.pt._check_overlap(self.candidate_gpu_ids_abs_list)
self.assertEqual(((), (1, 2), (1, 2), (3, 4)), gpuids_tuple)
gpuids_tuple = self.pt._check_overlap(([], [], [1, 2], [3, 4], [5], [6]))
self.assertEqual(((), (), (1, 2), (3, 4), (5,), (6,)), gpuids_tuple)
gpuids_tuple = self.pt._check_overlap(([1, 2], [6, 3]))
self.assertEqual(((1, 2), (6, 3)), gpuids_tuple)

def test__build_candidate_params(self):
pass
default_params = {'gpu_ids_abs': [],
'depth': 4,
'logdir': r"log/tresnet_24d_16m_1"}
Expand All @@ -148,7 +123,6 @@ def test__build_candidate_params(self):
self.assertEqual(candidate_params, total_params, "not equal!")

def test__add_logdirs_to_unfixed_params(self):
pass
unfixed_params = [
{'depth': 1, 'gpu_ids_abs': []},
{'depth': 2, 'gpu_ids_abs': [1, 2]}
Expand All @@ -161,7 +135,13 @@ def test__add_logdirs_to_unfixed_params(self):
self.assertEqual(final_unfixed_params, test_final_unfixed_params_list)

def test__convert_to_dirname(self):
pass
self.assertEqual(self.pt._convert_to_dirname("abc"), "abc")
self.assertEqual(self.pt._convert_to_dirname("123_abc_abc****"), "123_abc_abc")
self.assertEqual(self.pt._convert_to_dirname("*<>,/\\:?|abc"), "smallergreater___%$-abc")

def test_finish(self):
self.fail()

def test_error(self):
self.fail()

0 comments on commit cd19884

Please sign in to comment.