Skip to content

Commit

Permalink
fix typos (#193)
Browse files Browse the repository at this point in the history
  • Loading branch information
RainRat authored Apr 25, 2023
1 parent a665629 commit a7a14df
Show file tree
Hide file tree
Showing 28 changed files with 58 additions and 58 deletions.
4 changes: 2 additions & 2 deletions DeepCrazyhouse/configs/main_config_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@

# The rec directory contains the plane representation which are used in the training loop of the network
# use the the notebook create_rec_dataset to generate the .rec files:
# (Unfortunately when trying to start training with the big dataset a memory overflow occured.
# therfore the old working solution was used to train the latest model by loading the dataset via batch files)
# (Unfortunately when trying to start training with the big dataset a memory overflow occurred.
# therefore the old working solution was used to train the latest model by loading the dataset via batch files)
# "train.idx", "val.idx", "test.idx", "mate_in_one.idx", "train.rec", "val.rec", "test.rec", "mate_in_one.rec"
"rec_dir": "/home/demo_user/datasets/lichess/Crazyhouse/rec/",
# The architecture dir contains the architecture definition of the network in mxnet .symbol format
Expand Down
2 changes: 1 addition & 1 deletion DeepCrazyhouse/src/domain/abstract_cls/abs_game_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_pythonchess_board(self):
""" Force the child to implement get_pythonchess_board method"""

def is_draw(self):
""" Check if you can claim a draw - its assumed that the draw is always claimed """
""" Check if you can claim a draw - it's assumed that the draw is always claimed """
return self.board.can_claim_draw()

@abstractmethod
Expand Down
8 changes: 4 additions & 4 deletions DeepCrazyhouse/src/domain/agent/player/mcts_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ def _expand_root_node_single_move(self, state, legal_moves):
is_leaf = True
legal_moves_child = []
p_vec_small_child = None
# check if you can claim a draw - its assumed that the draw is always claimed
# check if you can claim a draw - it's assumed that the draw is always claimed
elif (
self.can_claim_threefold_repetition(state.get_transposition_key(), [0])
or state.get_pythonchess_board().can_claim_fifty_moves()
Expand Down Expand Up @@ -691,7 +691,7 @@ def _run_single_playout(self, parent_node: Node, pipe_id=0, depth=1, chosen_node
# establish a mate in one connection in order to stop exploring different alternatives
parent_node.set_check_mate_node_idx(child_idx)
# get the value from the leaf node (the current function is called recursively)
# check if you can claim a draw - its assumed that the draw is always claimed
# check if you can claim a draw - it's assumed that the draw is always claimed
elif (
self.can_claim_threefold_repetition(transposition_key, chosen_nodes)
or state.get_pythonchess_board().can_claim_fifty_moves() is True
Expand Down Expand Up @@ -766,7 +766,7 @@ def _run_single_playout(self, parent_node: Node, pipe_id=0, depth=1, chosen_node
def check_for_duplicate(self, transposition_key, chosen_nodes):
"""
:param transposition_key: Transposition key which defines the board state by all it's pieces and pocket state.
:param transposition_key: Transposition key which defines the board state by all its pieces and pocket state.
The move counter is disregarded.
:param chosen_nodes: List of moves which have been taken in the current path.
:return:
Expand All @@ -786,7 +786,7 @@ def can_claim_threefold_repetition(self, transposition_key, chosen_nodes):
Checks if a three fold repetition event can be claimed in the current search path.
This method makes use of the class transposition table and checks for board occurrences in the local search path
of the current thread as well.
:param transposition_key: Transposition key which defines the board state by all it's pieces and pocket state.
:param transposition_key: Transposition key which defines the board state by all its pieces and pocket state.
The move counter is disregarded.
:param chosen_nodes: List of integer indices which correspond to the child node indices chosen from the
root node downwards.
Expand Down
2 changes: 1 addition & 1 deletion DeepCrazyhouse/src/domain/agent/player/raw_net_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
@project: crazy_ara_refactor
@author: queensgambit
The raw network uses the the single network prediction for it's evaluation.
The raw network uses the the single network prediction for its evaluation.
No mcts search is being done.
"""
from time import time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ def __init__(
:param net: Neural Network API object which provides the reference for the neural network.
:param batch_size: Constant batch_size used for inference.
:param batch_state_planes: Shared numpy memory in which all threads set their state plane request for the
prediction service. Each threads has it's own channel.
prediction service. Each threads has its own channel.
:param batch_value_results: Shared numpy memory in which the value results of all threads are stored.
Each threads has it's own channel.
Each threads has its own channel.
:param batch_policy_results: Shared numpy memory in which the policy results of all threads are stored.
Each threads has it's own channel.
Each threads has its own channel.
"""
self.net = net
self.my_pipe_endings = pipe_endings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def policy_head(data, channels, act_type, channels_policy_head, select_policy_fr
:param n_labels: Number of possible move targets
:param grad_scale_policy: Optional re-weighting of gradient
:param use_se: Indicates if a squeeze excitation layer shall be used
:param no_bias: If no bias shall be used for the last conv layer before softmax (backward compability)
:param no_bias: If no bias shall be used for the last conv layer before softmax (backward compatibility)
"""
# for policy output
kernel = 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def mixture_net_symbol(channels=256, num_res_blocks=7, act_type='relu',
:param channels_value_head: Number of channels for the value head
:param value_fc_size: Number of units in the fully connected layer of the value head
:param channels_policy_head: Number of channels for the policy head
:param dropout_rate: Droput factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param dropout_rate: Dropout factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param grad_scale_value: Constant scalar which the gradient for the value outputs are being scaled width.
(0.01 is recommended for supervised learning with little data)
:param grad_scale_policy: Constant scalar which the gradient for the policy outputs are being scaled width.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def preact_resnet_se(channels=256, act_type='relu',
:param channels_value_head: Number of channels for the value head
:param value_fc_size: Number of units in the fully connected layer of the value head
:param channels_policy_head: Number of channels for the policy head
:param dropout_rate: Droput factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param dropout_rate: Dropout factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param grad_scale_value: Constant scalar which the gradient for the value outputs are being scaled width.
(0.01 is recommended for supervised learning with little data)
:param grad_scale_policy: Constant scalar which the gradient for the policy outputs are being scaled width.
Expand All @@ -61,7 +61,7 @@ def preact_resnet_se(channels=256, act_type='relu',
of residual blocks.
:param n_labels: Number of policy target labels (used for select_policy_from_plane=False)
:param se_ratio: Reduction ration used in the squeeze excitation module
:param se_types: List of squeeze exciation modules to use for each residual layer.
:param se_types: List of squeeze excitation modules to use for each residual layer.
The length of this list must be the same as len(kernels). Available types:
- "se": Squeeze excitation block - Hu et al. - https://arxiv.org/abs/1709.01507
- "cbam": Convolutional Block Attention Module (CBAM) - Woo et al. - https://arxiv.org/pdf/1807.06521.pdf
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,12 +101,12 @@ def __init__(self, nb_input_channels, board_height, board_width,
:param channels_value_head: Number of channels for the value head
:param value_fc_size: Number of units in the fully connected layer of the value head
:param channels_policy_head: Number of channels for the policy head
:param dropout_rate: Droput factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param dropout_rate: Dropout factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param select_policy_from_plane: True, if policy head type shall be used
:param kernels: List of kernel sizes used for the residual blocks. The length of the list corresponds to the number
of residual blocks.
:param n_labels: Number of policy target labels (used for select_policy_from_plane=False)
:param se_types: List of squeeze exciation modules to use for each residual layer.
:param se_types: List of squeeze excitation modules to use for each residual layer.
The length of this list must be the same as len(kernels). Available types:
- "se": Squeeze excitation block - Hu et al. - https://arxiv.org/abs/1709.01507
- "cbam": Convolutional Block Attention Module (CBAM) - Woo et al. - https://arxiv.org/pdf/1807.06521.pdf
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def rise_mobile_v2_symbol(channels=256, channels_operating_init=128, channel_exp
(They used 1.0 for default and 0.01 in the supervised setting)
:param grad_scale_policy: Constant scalar which the gradient for the policy outputs are being scaled width.
(They used 1.0 for default and 0.99 in the supervised setting)
:param dropout_rate: Applies optionally droput during learning with a given factor on the last feature space before
:param dropout_rate: Applies optionally dropout during learning with a given factor on the last feature space before
:param use_extra_variant_input: If true, the last 9 channel which represent the active variant are passed to each
residual block separately and concatenated at the end of the final feature representation
branching into value and policy head
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def rise_mobile_v3_symbol(channels=256, channels_operating_init=224, channel_exp
:param channels_value_head: Number of channels for the value head
:param value_fc_size: Number of units in the fully connected layer of the value head
:param channels_policy_head: Number of channels for the policy head
:param dropout_rate: Droput factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param dropout_rate: Dropout factor to use. If 0, no dropout will be applied. Value must be in [0,1]
:param grad_scale_value: Constant scalar which the gradient for the value outputs are being scaled width.
(0.01 is recommended for supervised learning with little data)
:param grad_scale_policy: Constant scalar which the gradient for the policy outputs are being scaled width.
Expand All @@ -117,7 +117,7 @@ def rise_mobile_v3_symbol(channels=256, channels_operating_init=224, channel_exp
of residual blocks.
:param n_labels: Number of policy target labels (used for select_policy_from_plane=False)
:param se_ratio: Reduction ration used in the squeeze excitation module
:param se_types: List of squeeze exciation modules to use for each residual layer.
:param se_types: List of squeeze excitation modules to use for each residual layer.
The length of this list must be the same as len(kernels). Available types:
- "se": Squeeze excitation block - Hu et al. - https://arxiv.org/abs/1709.01507
- "cbam": Convolutional Block Attention Module (CBAM) - Woo et al. - https://arxiv.org/pdf/1807.06521.pdf
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def convert_pytorch_tar_model_to_onnx(tar_file, input_shape, batch_sizes, model_
"""
train_config = TrainConfig()

# load the model and its paramters
# load the model and its parameters
net = get_rise_v33_model_by_train_config(input_shape, train_config)
if torch.cuda.is_available():
net.cuda(torch.device(f"cuda:{train_config.device_id}"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def board_to_planes(board: chess.Board, board_occ, normalize=True, last_moves=No
Total: 52 planes
:param board: Board handle (Python-chess object)
:param board_occ: Number of board occurences
:param board_occ: Number of board occurrences
:param normalize: True if the inputs shall be normalized to the range [0.-1.]
:param last_moves: List of last last moves. The most recent move is the first entry.
:return: planes - the plane representation of the current board state
Expand Down
2 changes: 1 addition & 1 deletion DeepCrazyhouse/src/domain/variants/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@
elif VERSION == 2:
NB_CHANNELS_POS = 12 + 1 + 18 # 12 pieces + 1 en-passant and 18 auxiliary
else: # VERSION == 3
NB_CHANNELS_POS = 15 + 15 # 12 pieces + 2 repetition + 1 en-passent + 15 auxiliary
NB_CHANNELS_POS = 15 + 15 # 12 pieces + 2 repetition + 1 en-passant + 15 auxiliary
if VERSION == 1:
NB_CHANNELS_CONST = 7
elif VERSION == 2:
Expand Down
4 changes: 2 additions & 2 deletions DeepCrazyhouse/src/domain/variants/game_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def get_pythonchess_board(self):
return self.board

def is_draw(self):
""" Check if you can claim a draw - its assumed that the draw is always claimed """
""" Check if you can claim a draw - it's assumed that the draw is always claimed """
return self.board.is_variant_draw() or self.can_claim_threefold_repetition() or self.board.can_claim_fifty_moves()
# return self.board.can_claim_draw()

Expand Down Expand Up @@ -102,7 +102,7 @@ def get_legal_moves(self):
return [*self.board.legal_moves] # is same as list(self.board.legal_moves)

def is_white_to_move(self):
""" Returns true if its whites turn"""
""" Returns true if it's white's turn"""
return self.board.turn

def mirror_policy(self) -> bool:
Expand Down
4 changes: 2 additions & 2 deletions DeepCrazyhouse/src/domain/variants/input_representation.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,9 +526,9 @@ def get_planes_statistics(board: chess.Board, normalize: bool, last_moves_uci: l
e.g get_planes_statistics(board, False, last_moves=[chess.Move.from_uci("d7d5")])
:param board: Chess board object
:param normalize: Decides if the planes should be normalized
:param last_moves_uci: Last moves in UCI notation. Chronoligically ordered, meaning first move is first entry and
:param last_moves_uci: Last moves in UCI notation. Chronologically ordered, meaning first move is first entry and
most recent move is last entry.
:param board_occ: Gives information on how often this position has occured already.
:param board_occ: Gives information on how often this position has occurred already.
"""
last_moves = []
for uci_move in last_moves_uci[::-1]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def set_illegal_moves_to_zero(board: chess.variant.CrazyhouseBoard, policy_vec,
def get_probs_of_move_list(policy_vec: np.ndarray, mv_list: [chess.Move], mirror_policy: bool, normalize: bool = True):
"""
Returns an array in which entry relates to the probability for the given move list.
Its assumed that the moves in the move list are legal and shouldn't be mirrored.
It's assumed that the moves in the move list are legal and shouldn't be mirrored.
:param policy_vec: Policy vector from the neural net prediction
:param mv_list: List of legal moves for a specific board position
:param mirror_policy: Decides if the current policy shall be mirrored
Expand Down
6 changes: 3 additions & 3 deletions DeepCrazyhouse/src/preprocessing/pgn_to_planes_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def __init__(
-> blosc.list_compressors()
['blosclz', 'lz4', 'lz4hc', 'snappy', 'zlib', 'zstd']
:param clevel: compression level (more info at: https://zarr.readthedocs.io/en/stable/tutorial.html#compressors)
# see http://www.h5py.org/lzf/ for performance comparision
# see http://www.h5py.org/lzf/ for performance comparison
# Tutorial about compression:
https://dziganto.github.io/out-of-core%20computation/HDF5-Or-How-I-Learned-To-Love-Data-Compression-And-Partial-Input-Output/
:param log_lvl: Sets the log_lvl for the log messages
Expand All @@ -84,7 +84,7 @@ def __init__(
pgn files. This has the condition that the pgns have been properly preprocessed before.
(This was option was added for the Stockfish self play dataset).
:param first_pgn_to_analyze: Optional parameter in which you can define the first pgn file to select.
If None it will automaticly choose the first file in the specified directory
If None it will automatically choose the first file in the specified directory
:param min_number_moves: Minimum of number of moves which have to be played in a game to be selected
:return:
"""
Expand Down Expand Up @@ -486,7 +486,7 @@ def export_pgn_batch(self, cur_part, game_idx_start, game_idx_end, pgn_sel, nb_w

# Refactoring is probably a good idea
# Too many arguments (8/5) - Too many local variables (32/15) - Too many statements (69/50)
params_inp = [] # create a param input list which will concatenate the pgn with it's corresponding game index
params_inp = [] # create a param input list which will concatenate the pgn with its corresponding game index
for i, pgn in enumerate(pgn_sel):
game_idx = game_idx_start + i
params_inp.append((pgn, game_idx, self._mate_in_one))
Expand Down
2 changes: 1 addition & 1 deletion DeepCrazyhouse/src/runtime/color_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def new(*args):


def enable_color_logging(debug_lvl=logging.DEBUG):
"""Groups every other function on this file, its the main of the module"""
"""Groups every other function on this file, it's the main of the module"""
if platform.system() == "Windows":
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
Expand Down
Loading

0 comments on commit a7a14df

Please sign in to comment.