Skip to content
This repository has been archived by the owner on Apr 27, 2023. It is now read-only.

Commit

Permalink
fix pydocstyle
Browse files Browse the repository at this point in the history
  • Loading branch information
Chi Chen committed Jul 27, 2020
1 parent c5afd99 commit 85bc586
Show file tree
Hide file tree
Showing 19 changed files with 599 additions and 148 deletions.
3 changes: 3 additions & 0 deletions megnet/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
"""
MatErials Graph Network (MEGNet) package
"""
__version__ = "1.1.8"
3 changes: 3 additions & 0 deletions megnet/activations.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
"""
Activation functions used in neural networks
"""
from typing import Callable, Any

import tensorflow.keras.backend as kb
Expand Down
52 changes: 29 additions & 23 deletions megnet/callbacks.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
"""
callbacks functions used in training process
"""
import logging
import os
import re
Expand All @@ -22,20 +25,6 @@
class ModelCheckpointMAE(Callback):
"""
Save the best MAE model with target scaler
Args:
filepath (string): path to save the model file with format. For example
`weights.{epoch:02d}-{val_mae:.6f}.hdf5` will save the corresponding epoch and
val_mae in the filename
monitor (string): quantity to monitor, default to "val_mae"
verbose (int): 0 for no training log, 1 for only epoch-level log and 2 for batch-level log
save_best_only (bool): whether to save only the best model
save_weights_only (bool): whether to save the weights only excluding model structure
val_gen (generator): validation generator
steps_per_val (int): steps per epoch for validation generator
target_scaler (object): exposing inverse_transform method to scale the output
period (int): number of epoch interval for this callback
mode: (string) choose from "min", "max" or "auto"
"""

def __init__(self,
Expand All @@ -49,6 +38,21 @@ def __init__(self,
target_scaler: Scaler = None,
period: int = 1,
mode: str = 'auto'):
"""
Args:
filepath (string): path to save the model file with format. For example
`weights.{epoch:02d}-{val_mae:.6f}.hdf5` will save the corresponding epoch and
val_mae in the filename
monitor (string): quantity to monitor, default to "val_mae"
verbose (int): 0 for no training log, 1 for only epoch-level log and 2 for batch-level log
save_best_only (bool): whether to save only the best model
save_weights_only (bool): whether to save the weights only excluding model structure
val_gen (generator): validation generator
steps_per_val (int): steps per epoch for validation generator
target_scaler (object): exposing inverse_transform method to scale the output
period (int): number of epoch interval for this callback
mode: (string) choose from "min", "max" or "auto"
"""
super().__init__()
if val_gen is None:
raise ValueError('No validation data is provided!')
Expand Down Expand Up @@ -167,15 +171,6 @@ class ReduceLRUponNan(Callback):
It has an extra function that patience for early stopping.
This will move to indepedent callback in the future.
Args:
filepath (str): filepath for saved model checkpoint, should be consistent with
checkpoint callback
factor (float): a value < 1 for scaling the learning rate
verbose (bool): whether to show the loading event
patience (int): number of steps that the val mae does not change.
It is a criteria for early stopping
monitor (str): target metric to monitor
mode (str): min, max or auto
"""

def __init__(self,
Expand All @@ -185,6 +180,17 @@ def __init__(self,
patience: int = 500,
monitor: str = 'val_mae',
mode: str = 'auto'):
"""
Args:
filepath (str): filepath for saved model checkpoint, should be consistent with
checkpoint callback
factor (float): a value < 1 for scaling the learning rate
verbose (bool): whether to show the loading event
patience (int): number of steps that the val mae does not change.
It is a criteria for early stopping
monitor (str): target metric to monitor
mode (str): min, max or auto
"""
self.filepath = filepath
self.verbose = verbose
self.factor = factor
Expand Down
4 changes: 4 additions & 0 deletions megnet/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@


class DataType:
"""
Data types for tensorflow. This enables users to choose
from 32-bit float and int, and 16-bit float and int
"""
np_float = np.float32
np_int = np.int32
tf_float = tf.float32
Expand Down
24 changes: 24 additions & 0 deletions megnet/data/crystal.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,14 @@ def __init__(self,
bond_converter: Converter = None,
cutoff: float = 5.0
):
"""
Convert the structure into crystal graph
Args:
nn_strategy (str): NearNeighbor strategy
atom_converter (Converter): atom features converter
bond_converter (Converter): bond features converter
cutoff (float): cutoff radius
"""
self.cutoff = cutoff
super().__init__(nn_strategy=nn_strategy, atom_converter=atom_converter,
bond_converter=bond_converter, cutoff=self.cutoff)
Expand All @@ -46,10 +54,26 @@ def __init__(self,
nn_strategy: Union[str, NearNeighbors] = 'VoronoiNN',
atom_converter: Converter = None,
bond_converter: Converter = None):
"""
Args:
nn_strategy (str): NearNeighbor strategy
atom_converter (Converter): atom features converter
bond_converter (Converter): bond features converter
"""
super().__init__(nn_strategy=nn_strategy, atom_converter=atom_converter,
bond_converter=bond_converter)

def convert(self, structure: Structure, state_attributes: List = None) -> Dict:
"""
Convert structure into graph
Args:
structure (Structure): pymatgen Structure
state_attributes (list): state attributes
Returns: graph dictionary
"""
graph = super().convert(structure, state_attributes=state_attributes)
return self._get_bond_type(graph)

Expand Down

0 comments on commit 85bc586

Please sign in to comment.