Skip to content

Commit

Permalink
change readme
Browse files Browse the repository at this point in the history
  • Loading branch information
TsumiNa committed Jan 29, 2018
1 parent 742df44 commit 35d86a7
Show file tree
Hide file tree
Showing 14 changed files with 191 additions and 37 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ env:
before_install:
- mkdir -p ~/.xenonpy/dataset
- mkdir -p ~/.xenonpy/cached
- cp travis/*.pkl.pd_ ~/.xenonpy/dataset/
- ls ~/.xenonpy
- cp travis/matplotlibrc_${BACKEND} matplotlibrc
- if [ $BACKEND == "qtagg" ]; then
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
name: xepy36
name: xepy35
channels:
- defaults
- peterjc123
- matsci
dependencies:
- python=3.6
- python=3.5
- numpy
- pandas
- tqdm
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ dependencies:
- scipy
- pytorch
- pyyaml
- pytest
- pytest-cov
- pylint
- sphinx
- sphinx_rtd_theme
- jupyter
- pip:
- pymonad
- matminer
File renamed without changes.
File renamed without changes.
File renamed without changes
96 changes: 96 additions & 0 deletions random_test.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sequential(\n",
" (0): Layer1d(\n",
" (neuron): Linear(in_features=290, out_features=100)\n",
" (batch_nor): BatchNorm1d(100, eps=0.5, momentum=0.1, affine=True)\n",
" (act_func): ReLU()\n",
" (dropout): Dropout(p=0.3)\n",
" )\n",
" (1): Layer1d(\n",
" (neuron): Linear(in_features=100, out_features=50)\n",
" (batch_nor): BatchNorm1d(50, eps=0.30000000000000004, momentum=0.1, affine=True)\n",
" (act_func): ReLU()\n",
" (dropout): Dropout(p=0.06)\n",
" )\n",
" (2): Layer1d(\n",
" (neuron): Linear(in_features=50, out_features=50)\n",
" (batch_nor): BatchNorm1d(50, eps=0.30000000000000004, momentum=0.1, affine=True)\n",
" (act_func): ReLU()\n",
" (dropout): Dropout(p=0.21999999999999995)\n",
" )\n",
" (3): Layer1d(\n",
" (neuron): Linear(in_features=50, out_features=100)\n",
" (batch_nor): BatchNorm1d(100, eps=0.4, momentum=0.1, affine=True)\n",
" (act_func): ReLU()\n",
" (dropout): Dropout(p=0.41999999999999993)\n",
" )\n",
" (4): Layer1d(\n",
" (neuron): Linear(in_features=100, out_features=100)\n",
" (batch_nor): BatchNorm1d(100, eps=0.2, momentum=0.1, affine=True)\n",
" (act_func): ReLU()\n",
" (dropout): Dropout(p=0.3)\n",
" )\n",
" (5): Layer1d(\n",
" (neuron): Linear(in_features=100, out_features=1)\n",
" )\n",
")\n"
]
}
],
"source": [
"from xenonpy.model.nn import NNGenerator1d\n",
"import numpy as np\n",
"\n",
"g = NNGenerator1d(290, 1, n_neuron=[100, 50, 30], p_drop=np.arange(0.02, 0.5, 0.04),\n",
" batch_normalize=[True], momentum=np.arange(0.1, 0.6, 0.1))\n",
"m = g(hidden=5)\n",
"print(m)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
1 change: 0 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ def get_requirements(filename: str):

# Read requirements.txt, ignore comments
REQUIRES = get_requirements("requirements.txt")
REQUIRES_TEST = get_requirements("requirements_test.txt")

setup(
name=PKG_NAME,
Expand Down
Binary file removed travis/elements.pkl.pd_
Binary file not shown.
Binary file removed travis/elements_completed.pkl.pd_
Binary file not shown.
98 changes: 70 additions & 28 deletions xenonpy/model/nn/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.

from pathlib import Path
from itertools import product

import numpy as np
import torch as tc
import torch.nn as nn
from collections import namedtuple
from pathlib import Path
from sklearn.base import BaseEstimator, RegressorMixin
from torch.autograd import Variable as V

Expand All @@ -18,17 +20,45 @@


class NNGenerator1d(object):
"""
Generate random model from the supplied parameters.
"""
def __init__(self, n_features: int, n_predict: int, *,
n_neuron: [int],
p_drop: [float] = (0.0,),
layer_func: [] = (nn.Linear,),
act_func: [] = (nn.ReLU(),),
lr: [float] = (None,),
batch_normalize: [bool] = (False,),
momentum: [float] = (0.1,)
):
"""
Parameters
----------
n_features: int
Input dimension.
n_predict: int
Output dimension.
n_neuron: int-list
Number of neuron.
p_drop: float-list
Dropout rate.
layer_func: func-list
Layer functions. such like: :class:`torch.nn.Linear`.
act_func: func-list
Activation functions. such like: :class:``torch.nn.ReLU`.
lr: float-list
Learning rates.
batch_normalize: bool-list
Batch Normalization. such like: :class:`torch.nn.BatchNorm1d`.
momentum: float-list
The value used for the running_mean and running_var computation.
"""
self.n_in, self.n_out = n_features, n_predict

# save parameters
self.lr = lr
self.n_neuron = n_neuron
self.p_drop = p_drop
self.layer_func = layer_func
Expand All @@ -38,38 +68,50 @@ def __init__(self, n_features: int, n_predict: int, *,

# prepare layer container
# calculate layer's variety
self.layers = list()
self.layers_len = 0
self.__lay_vars()

def __lay_vars(self):
for n in self.n_neuron:
for p in self.p_drop:
for l in self.layer_func:
for a in self.act_func:
for b in self.batch_normalize:
for m in self.momentum:
layer = dict(n_in=0,
n_out=n,
p_drop=p,
layer_func=l,
act_func=a,
batch_normalize=b,
momentum=m)
self.layers.append(layer)
self.layers_len = len(self.layers)

def __call__(self, hidden=3):
ids = np.random.randint(0, self.layers_len, hidden)
self.layer_var = list(product(n_neuron, p_drop, layer_func, act_func, batch_normalize, momentum, lr))

def __call__(self, hidden: int = 3, n_sample: int = 0, scheduler=None):
"""
Generate sample model.
Parameters
----------
hidden: int
Number of hidden layers.
n_sample: int
Number of model sample
scheduler:
A function be used to determining the layer properties from previous layer.
>>> # index: layer index in a model; pars: parameters of previous layer as dict.
>>> # include: n_neuron, p_drop, layer_func, act_func, lr, batch_normalize, momentum
>>> scheduler = lambda index, pars: pars
Returns
-------
ret: iterable
Samples as generator
"""
layer_paras = namedtuple('LayerParas',
['n_in',
'n_out',
'p_drop',
'layer_func',
'act_func',
'batch_normalize',
'momentum',
'lr']
)
layers_len = len(self.layer_var)
ids = np.random.randint(0, layers_len, hidden)
layers = list()

# set layers
self.layers[ids[0]]['n_in'] = self.n_in
n_in = self.n_in
for i in ids:
self.layers[i]['n_in'] = n_in
layers.append(Layer1d(**self.layers[i]))
n_in = self.layers[i]['n_out']
layer = (n_in,) + self.layer_var[i]
layers.append(Layer1d(**(layer_paras(*layer)._asdict())))
n_in = self.layer_var[i][0]
out_layer = Layer1d(n_in=n_in, n_out=self.n_out, act_func=None)
layers.append(out_layer)

Expand Down
22 changes: 17 additions & 5 deletions xenonpy/model/nn/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,16 @@


class Layer1d(nn.Module):
def __init__(self, *,
n_in: int,
n_out: int,
def __init__(self, n_in: int, n_out: int, *,
p_drop: float = 0.0,
layer_func=nn.Linear,
act_func=nn.ReLU(),
batch_normalize=False,
momentum=0.1
batch_normalize: bool = False,
momentum: float = 0.1,
lr: float = None
):
super().__init__()
self.lr = lr
self.neuron = layer_func(n_in, n_out)
self.batch_nor = None if not batch_normalize else nn.BatchNorm1d(n_out, momentum)
self.act_func = None if not act_func else act_func
Expand All @@ -31,3 +31,15 @@ def forward(self, x):
_out = self.act_func(_out)
return _out


if __name__ == '__main__':
model = nn.Sequential(
nn.Linear(2, 3),
# nn.ReLU(),
nn.Linear(3, 2),
# nn.ReLU()
)

params_dict = dict(model.named_parameters())
for key, value in params_dict.items():
print("key:{}, value:{}".format(key, value))

0 comments on commit 35d86a7

Please sign in to comment.