Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove pylint #4836

Merged
merged 3 commits into from
Jan 8, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 0 additions & 12 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,18 +77,6 @@ repos:
hooks:
- id: python-check-mock-methods

- repo: https://github.com/pre-commit/mirrors-pylint
rev: v2.4.4
hooks:
- id: pylint
exclude: >
(?x)^(
.*_pb2.py|
.*_pb2_grpc.py|
.*/tests/.*
)$
args: [--score=n]

- repo: https://github.com/mattlqx/pre-commit-search-and-replace
rev: v1.0.3
hooks:
Expand Down
53 changes: 0 additions & 53 deletions .pylintrc

This file was deleted.

2 changes: 1 addition & 1 deletion ml-agents-envs/mlagents_envs/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ def _returncode_to_signal_name(returncode: int) -> Optional[str]:
"""
try:
# A negative value -N indicates that the child was terminated by signal N (POSIX only).
s = signal.Signals(-returncode) # pylint: disable=no-member
s = signal.Signals(-returncode)
return s.name
except Exception:
# Should generally be a ValueError, but catch everything just in case.
Expand Down
2 changes: 1 addition & 1 deletion ml-agents-envs/mlagents_envs/registry/binary_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def get_local_binary_path(name: str, url: str) -> str:
break
try:
download_and_extract_zip(url, name)
except Exception: # pylint: disable=W0702
except Exception:
if attempt + 1 < NUMBER_ATTEMPTS:
logger.warning(
f"Attempt {attempt + 1} / {NUMBER_ATTEMPTS}"
Expand Down
15 changes: 3 additions & 12 deletions ml-agents-envs/mlagents_envs/rpc_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,9 +220,7 @@ def observation_to_np_array(
def _process_visual_observation(
obs_index: int,
shape: Tuple[int, int, int],
agent_info_list: Collection[
AgentInfoProto
], # pylint: disable=unsubscriptable-object
agent_info_list: Collection[AgentInfoProto],
) -> np.ndarray:
if len(agent_info_list) == 0:
return np.zeros((0, shape[0], shape[1], shape[2]), dtype=np.float32)
Expand Down Expand Up @@ -256,11 +254,7 @@ def _raise_on_nan_and_inf(data: np.array, source: str) -> np.array:

@timed
def _process_vector_observation(
obs_index: int,
shape: Tuple[int, ...],
agent_info_list: Collection[
AgentInfoProto
], # pylint: disable=unsubscriptable-object
obs_index: int, shape: Tuple[int, ...], agent_info_list: Collection[AgentInfoProto]
) -> np.ndarray:
if len(agent_info_list) == 0:
return np.zeros((0,) + shape, dtype=np.float32)
Expand All @@ -277,10 +271,7 @@ def _process_vector_observation(

@timed
def steps_from_proto(
agent_info_list: Collection[
AgentInfoProto
], # pylint: disable=unsubscriptable-object
behavior_spec: BehaviorSpec,
agent_info_list: Collection[AgentInfoProto], behavior_spec: BehaviorSpec
) -> Tuple[DecisionSteps, TerminalSteps]:
decision_agent_info_list = [
agent_info for agent_info in agent_info_list if not agent_info.done
Expand Down
2 changes: 0 additions & 2 deletions ml-agents/mlagents/torch_utils/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ def assert_torch_installed():
torch.set_num_threads(cpu_utils.get_num_threads_to_use())
os.environ["KMP_BLOCKTIME"] = "0"

# Known PyLint compatibility with PyTorch https://github.com/pytorch/pytorch/issues/701
# pylint: disable=E1101
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device("cuda")
Expand Down
1 change: 0 additions & 1 deletion ml-agents/mlagents/trainers/barracuda.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# pylint: skip-file
# flake8: noqa
from __future__ import print_function
from collections import defaultdict
Expand Down
1 change: 0 additions & 1 deletion ml-agents/mlagents/trainers/learn.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@


def get_version_string() -> str:
# pylint: disable=no-member
return f""" Version information:
ml-agents: {mlagents.trainers.__version__},
ml-agents-envs: {mlagents_envs.__version__},
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/optimizer/torch_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from mlagents.trainers.torch.utils import ModelUtils


class TorchOptimizer(Optimizer): # pylint: disable=W0223
class TorchOptimizer(Optimizer):
def __init__(self, policy: TorchPolicy, trainer_settings: TrainerSettings):
super().__init__()
self.policy = policy
Expand Down
4 changes: 1 addition & 3 deletions ml-agents/mlagents/trainers/policy/torch_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,9 +204,7 @@ def get_action(
for agent_id in decision_requests.agent_id
] # For 1-D array, the iterator order is correct.

run_out = self.evaluate(
decision_requests, global_agent_ids
) # pylint: disable=assignment-from-no-return
run_out = self.evaluate(decision_requests, global_agent_ids)
self.save_memories(global_agent_ids, run_out.get("memory_out"))
self.check_nan_action(run_out.get("action"))
return ActionInfo(
Expand Down
4 changes: 2 additions & 2 deletions ml-agents/mlagents/trainers/sac/optimizer_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def forward(
# ExitStack allows us to enter the torch.no_grad() context conditionally
with ExitStack() as stack:
if not q1_grad:
stack.enter_context(torch.no_grad()) # pylint: disable=E1101
stack.enter_context(torch.no_grad())
q1_out, _ = self.q1_network(
inputs,
actions=actions,
Expand All @@ -85,7 +85,7 @@ def forward(
)
with ExitStack() as stack:
if not q2_grad:
stack.enter_context(torch.no_grad()) # pylint: disable=E1101
stack.enter_context(torch.no_grad())
q2_out, _ = self.q2_network(
inputs,
actions=actions,
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/tests/torch/test_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.test_trajectory import make_fake_trajectory
from mlagents.trainers.settings import NetworkSettings
from mlagents.trainers.tests.dummy_config import ( # noqa: F401; pylint: disable=unused-variable
from mlagents.trainers.tests.dummy_config import ( # noqa: F401
ppo_dummy_config,
curiosity_dummy_config,
gail_dummy_config,
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/tests/torch/test_sac.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from mlagents.trainers.policy.torch_policy import TorchPolicy
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.settings import NetworkSettings
from mlagents.trainers.tests.dummy_config import ( # noqa: F401; pylint: disable=unused-variable
from mlagents.trainers.tests.dummy_config import ( # noqa: F401
sac_dummy_config,
curiosity_dummy_config,
)
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/trainer/rl_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
logger = get_logger(__name__)


class RLTrainer(Trainer): # pylint: disable=abstract-method
class RLTrainer(Trainer):
"""
This class is the base class for trainers that use Reward Signals.
"""
Expand Down