Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move Pyright to pre-commit + add pydocstyle #737

Merged
merged 17 commits into from
Sep 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 0 additions & 7 deletions .github/workflows/linux-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,3 @@ jobs:
- name: Release Test
run: |
xvfb-run -s "-screen 0 1024x768x24" pytest -v --cov=pettingzoo --cov-report term
- name: Check code with pyright
uses: jakebailey/pyright-action@v1
with:
version: 1.1.244
python-platform: ${{ matrix.python-platform }}
python-version: ${{ matrix.python-version }}
continue-on-error: true
28 changes: 25 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/python/black
rev: 22.3.0
rev: 22.6.0
hooks:
- id: black
- repo: https://github.com/codespell-project/codespell
Expand All @@ -28,13 +28,35 @@ repos:
- id: isort
args: ["--profile", "black"]
- repo: https://github.com/asottile/pyupgrade
rev: v2.32.1
rev: v2.34.0
hooks:
- id: pyupgrade
# TODO: remove `--keep-runtime-typing` option
args: ["--py37-plus", "--keep-runtime-typing"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.2.0 # Use the ref you want to point at
rev: v4.3.0
hooks:
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/pycqa/pydocstyle
rev: 6.1.1
hooks:
- id: pydocstyle
args:
- --source
- --explain
- --convention=google
- --count
# TODO: Remove ignoring rules D101, D102, D103, D105
- --add-ignore=D100,D107,D101,D102,D103,D105
exclude: "__init__.py$|^pettingzoo.test|^docs"
additional_dependencies: ["toml"]
- repo: local
hooks:
- id: pyright
name: pyright
entry: pyright
language: node
pass_filenames: false
types: [python]
additional_dependencies: ["pyright"]
20 changes: 14 additions & 6 deletions pettingzoo/atari/base_atari_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,11 @@ def __init__(
max_cycles=100000,
auto_rom_install_path=None,
):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
"""Initializes the `ParallelAtariEnv` class.

Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int.
"""
EzPickle.__init__(
self,
game,
Expand Down Expand Up @@ -264,9 +267,12 @@ def close(self):
self._screen = None

def clone_state(self):
"""Clone emulator state w/o system state. Restoring this state will
*not* give an identical environment. For complete cloning and restoring
of the full state, see `{clone,restore}_full_state()`."""
"""Clone emulator state w/o system state.

Restoring this state will *not* give an identical environment.
For complete cloning and restoring of the full state,
see `{clone,restore}_full_state()`.
"""
state_ref = self.ale.cloneState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
Expand All @@ -280,7 +286,9 @@ def restore_state(self, state):

def clone_full_state(self):
"""Clone emulator state w/ system state including pseudorandomness.
Restoring this state will give an identical environment."""

Restoring this state will give an identical environment.
"""
state_ref = self.ale.cloneSystemState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
Expand Down
22 changes: 10 additions & 12 deletions pettingzoo/butterfly/cooperative_pong/cake_paddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,18 @@ def _process_collision_with_rect(self, rect, b_rect, b_speed, paddle_type):
return True, b_rect, b_speed

def process_collision(self, b_rect, b_speed, paddle_type):
"""
"""Returns if ball collides with paddle.

Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
ignore paddle type
Args:
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
ignore paddle type

Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
Returns:
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed

"""
if self.rect4.colliderect(b_rect):
Expand Down
4 changes: 1 addition & 3 deletions pettingzoo/butterfly/cooperative_pong/cooperative_pong.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,7 @@ def observe(self):
return observation

def state(self):
"""
Returns an observation of the global environment
"""
"""Returns an observation of the global environment."""
state = pygame.surfarray.pixels3d(self.screen).copy()
state = np.rot90(state, k=3)
state = np.fliplr(state)
Expand Down
20 changes: 9 additions & 11 deletions pettingzoo/butterfly/cooperative_pong/paddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,17 @@ def update(self, area, action):
self.rect = newpos

def process_collision(self, b_rect, b_speed, paddle_type):
"""
"""Process a collision.

Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
Args:
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed

Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
Returns:
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed

"""
if not self.rect.colliderect(b_rect):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -389,9 +389,7 @@ def observe(self, agent):
return state

def state(self):
"""
Returns an observation of the global environment
"""
"""Returns an observation of the global environment."""
if not self.vector_state:
state = pygame.surfarray.pixels3d(self.WINDOW).copy()
state = np.rot90(state, k=3)
Expand Down
4 changes: 1 addition & 3 deletions pettingzoo/butterfly/pistonball/pistonball.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,7 @@ def observe(self, agent):
return observation

def state(self):
"""
Returns an observation of the global environment
"""
"""Returns an observation of the global environment."""
state = pygame.surfarray.pixels3d(self.screen).copy()
state = np.rot90(state, k=3)
state = np.fliplr(state)
Expand Down
8 changes: 5 additions & 3 deletions pettingzoo/classic/chess/chess_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ def make_move_mapping(uci_move):


def legal_moves(orig_board):
"""
"""Returns legal moves.

action space is a 8x8x73 dimensional array
Each of the 8×8
positions identifies the square from which to “pick up” a piece. The first 56 planes encode
Expand Down Expand Up @@ -194,8 +195,9 @@ def legal_moves(orig_board):


def get_observation(orig_board, player):
"""
Observation is an 8x8x(P + L) dimensional array
"""Returns observation array.

Observation is an 8x8x(P + L) dimensional array.
P is going to be your pieces positions + your opponents pieces positions
L is going to be some metadata such as repetition count,,
"""
Expand Down
23 changes: 13 additions & 10 deletions pettingzoo/classic/go/go_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@

# Code from: https://github.com/tensorflow/minigo

"""
"""Minimalist Go engine.

A board is a NxN numpy array.
A Coordinate is a tuple index into the board.
A Move is a (Coordinate c | None).
Expand Down Expand Up @@ -85,8 +86,8 @@ def place_stones(board, color, stones):


def replay_position(position, result):
"""
Wrapper for a go.Position which replays its history.
"""Wrapper for a go.Position which replays its history.

Assumes an empty start position! (i.e. no handicap, and history must be exhaustive.)

Result must be passed in, since a resign cannot be inferred from position
Expand Down Expand Up @@ -120,7 +121,7 @@ def find_reached(board, c):


def is_koish(board, c):
"Check if c is surrounded on all sides by 1 color, and return that color"
"""Check if c is surrounded on all sides by 1 color, and return that color."""
if board[c] != EMPTY:
return None
neighbors = {board[n] for n in NEIGHBORS[c]}
Expand All @@ -131,7 +132,7 @@ def is_koish(board, c):


def is_eyeish(board, c):
"Check if c is an eye, for the purpose of restricting MC rollouts."
"""Check if c is an eye, for the purpose of restricting MC rollouts."""
# pass is fine.
if c is None:
return
Expand All @@ -152,7 +153,8 @@ def is_eyeish(board, c):


class Group(namedtuple("Group", ["id", "stones", "liberties", "color"])):
"""
"""Defines a Group.

stones: a frozenset of Coordinates belonging to this group
liberties: a frozenset of Coordinates that are empty and adjacent to this group.
color: color of this group
Expand Down Expand Up @@ -325,7 +327,8 @@ def __init__(
board_deltas=None,
to_play=BLACK,
):
"""
"""Initializes the `Position` class.

board: a numpy array
n: an int representing moves played so far
komi: a float, representing points given to the second player.
Expand Down Expand Up @@ -435,7 +438,7 @@ def is_move_suicidal(self, move):
return not potential_libs

def is_move_legal(self, move):
"Checks that a move is on an empty space, not on ko, and not suicide"
"""Checks that a move is on an empty space, not on ko, and not suicide."""
if move is None:
return True
if self.board[move] != EMPTY:
Expand All @@ -448,7 +451,7 @@ def is_move_legal(self, move):
return True

def all_legal_moves(self):
"Returns a np.array of size go.N**2 + 1, with 1 = legal, 0 = illegal"
"""Returns a np.array of size go.N**2 + 1, with 1 = legal, 0 = illegal."""
# by default, every move is legal
legal_moves = np.ones([N, N], dtype=np.int8)
# ...unless there is already a stone there
Expand Down Expand Up @@ -566,7 +569,7 @@ def is_game_over(self):
)

def score(self):
"Return score from B perspective. If W is winning, score is negative."
"""Return score from B perspective. If W is winning, score is negative."""
working_board = np.copy(self.board)
while EMPTY in working_board:
unassigned_spaces = np.where(working_board == EMPTY)
Expand Down
14 changes: 6 additions & 8 deletions pettingzoo/classic/hanabi/hanabi.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ def __init__(
observation_type: int = 1,
random_start_player: bool = False,
):
"""
"""Initializes the `raw_env` class.

Parameter descriptions :
- colors: int, Number of colors in [2,5].
- ranks: int, Number of ranks in [2,5].
Expand Down Expand Up @@ -266,13 +267,12 @@ def all_moves(self) -> List[int]:

# ToDo: Fix Return value
def reset(self, seed=None, return_info=False, options=None):
"""Resets the environment for a new game and returns observations of current player as List of ints
"""Resets the environment for a new game and returns observations of current player as List of ints.

Returns:
observation: Optional list of integers of length self.observation_vector_dim, describing observations of
current agent (agent_selection).
"""

if seed is not None:
self.seed(seed=seed)

Expand All @@ -290,7 +290,6 @@ def reset(self, seed=None, return_info=False, options=None):

def _reset_agents(self, player_number: int):
"""Rearrange self.agents as pyhanabi starts a different player after each reset()."""

# Shifts self.agents list as long order starting player is not according to player_number
while not self.agents[0] == "player_" + str(player_number):
self.agents = self.agents[1:] + [self.agents[0]]
Expand Down Expand Up @@ -358,8 +357,7 @@ def observe(self, agent_name: str):
def _process_latest_observations(
self, obs: Dict, reward: Optional[float] = 0, done: Optional[bool] = False
):
"""Updates internal state"""

"""Updates internal state."""
self.latest_observations = obs
self.rewards = {a: reward for a in self.agents}
self.dones = {player_name: done for player_name in self.agents}
Expand All @@ -380,9 +378,9 @@ def _process_latest_observations(
}

def render(self, mode="human"):
"""Supports console print only. Prints player's data.
"""Prints player's data.

Example:
Supports console print only.
"""
player_data = self.latest_observations["player_observations"]
print(
Expand Down
4 changes: 3 additions & 1 deletion pettingzoo/classic/rps/rps.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,10 @@ def env(**kwargs):

class raw_env(AECEnv):
"""Two-player environment for rock paper scissors.

Expandable environment to rock paper scissors lizard spock action_6 action_7 ...
The observation is simply the last opponent action."""
The observation is simply the last opponent action.
"""

metadata = {
"render_modes": ["human", "rgb_array"],
Expand Down
4 changes: 1 addition & 3 deletions pettingzoo/magent/magent_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,9 +211,7 @@ def _all_dones(self, step_done=False):
}

def state(self):
"""
Returns an observation of the global environment
"""
"""Returns an observation of the global environment."""
state = np.copy(self.base_state)

for handle in self._all_handles:
Expand Down
3 changes: 2 additions & 1 deletion pettingzoo/mpe/simple_crypto/simple_crypto.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""
"""Simple crypto environment.

Scenario:
1 speaker, 2 listeners (one of which is an adversary). Good agents rewarded for proximity to goal, and distance from
adversary to goal. Adversary is rewarded for its distance to the goal.
Expand Down