Skip to content

Commit

Permalink
Turn on F401: Unused import warning. (#18598)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #18598
ghimport-source-id: c74597e

Stack from [ghstack](https://github.com/ezyang/ghstack):
* **#18598 Turn on F401: Unused import warning.**

This was requested by someone at Facebook; this lint is turned
on for Facebook by default.  "Sure, why not."

I had to noqa a number of imports in __init__.  Hypothetically
we're supposed to use __all__ in this case, but I was too lazy
to fix it.  Left for future work.

Be careful!  flake8-2 and flake8-3 behave differently with
respect to import resolution for # type: comments.  flake8-3 will
report an import unused; flake8-2 will not.  For now, I just
noqa'd all these sites.

All the changes were done by hand.

Signed-off-by: Edward Z. Yang <ezyang@fb.com>

Differential Revision: D14687478

fbshipit-source-id: 30d532381e914091aadfa0d2a5a89404819663e3
  • Loading branch information
ezyang authored and facebook-github-bot committed Mar 30, 2019
1 parent 96456bf commit 173f224
Show file tree
Hide file tree
Showing 131 changed files with 175 additions and 333 deletions.
2 changes: 1 addition & 1 deletion .circleci/cimodel/lib/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def handle_missing_graphviz(f):
calls to the draw() method of the returned object to do nothing.
"""
try:
import pygraphviz
import pygraphviz # noqa: F401
return f

except ModuleNotFoundError:
Expand Down
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ max-line-length = 120
# C408 ignored because we like the dict keyword argument syntax
# E501 is not flexible enough, we're using B950 instead
ignore =
E203,E305,E402,E501,E721,E741,F401,F403,F405,F821,F841,F999,W503,W504,C408,
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,
# these ignores are from flake8-bugbear; please fix!
B007,B008,
# these ignores are from flake8-comprehensions; please fix!
Expand Down
1 change: 0 additions & 1 deletion .jenkins/pytorch/perf_test/compare_with_baseline.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import sys
import json
import math
import numpy
import argparse

parser = argparse.ArgumentParser()
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/fastrnns/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .cells import *
from .factory import *
from .cells import * # noqa: F401
from .factory import * # noqa: F401

# (output, next_state) = cell(input, state)
seqLength = 100
Expand Down
1 change: 0 additions & 1 deletion benchmarks/fastrnns/profile.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import argparse
import os
import subprocess
import sys
import time
Expand Down
1 change: 0 additions & 1 deletion benchmarks/fastrnns/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import torch
import torch.nn as nn

from .cells import lstm_cell
from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator
from .runner import get_nn_runners

Expand Down
3 changes: 0 additions & 3 deletions docs/cpp/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,8 @@
import os
# sys.path.insert(0, os.path.abspath('.'))

import sys
import textwrap

import pytorch_sphinx_theme

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@

import torch
try:
import torchvision
import torchvision # noqa: F401
except ImportError:
import warnings
warnings.warn('unable to load "torchvision" package')
Expand Down
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@
# we will search for libraries in these paths

from __future__ import print_function
from setuptools import setup, Extension, distutils, Command, find_packages
from setuptools import setup, Extension, distutils, find_packages
from distutils import core, dir_util
from distutils.core import Distribution
from distutils.errors import DistutilsArgError
Expand All @@ -151,7 +151,6 @@
import distutils.command.clean
import distutils.sysconfig
import filecmp
import platform
import subprocess
import shutil
import sys
Expand Down
6 changes: 3 additions & 3 deletions test/common_methods_invocations.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import torch
from torch._six import inf, nan, istuple
from functools import reduce, wraps
from torch._six import inf, istuple
from functools import reduce
from operator import mul, itemgetter
import collections
from torch.autograd import Variable, Function, detect_anomaly
from torch.autograd import Variable
from torch.testing import make_non_contiguous
from common_utils import (skipIfNoLapack,
prod_single_zero, random_square_matrix_of_rank,
Expand Down
2 changes: 1 addition & 1 deletion test/common_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import torch.nn.functional as F
from torch.nn.functional import _Reduction
from common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \
TEST_WITH_ROCM, skipIfRocm
TEST_WITH_ROCM
from common_cuda import TEST_CUDA
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors
from torch.autograd import Variable
Expand Down
1 change: 0 additions & 1 deletion test/custom_operator/test_custom_ops.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import argparse
import os.path
import tempfile
import unittest
Expand Down
2 changes: 0 additions & 2 deletions test/onnx/debug_embed_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,10 @@
from __future__ import unicode_literals

import sys
import itertools

import torch
import torch.jit
from torch.autograd import Variable
import torch.autograd.function as function

import onnx
import caffe2.python.onnx.backend as c2
Expand Down
3 changes: 0 additions & 3 deletions test/onnx/export_onnx_tests_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,9 @@

import argparse
import glob
import numpy as np
import onnx.backend.test
import caffe2.python.onnx.backend as c2
import os
import shutil
from onnx import numpy_helper
from test_caffe2_common import run_generated_test
import google.protobuf.text_format
import test_onnx_common
Expand Down
1 change: 0 additions & 1 deletion test/onnx/export_onnx_tests_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import torch
import traceback

import test_pytorch_common
import test_onnx_common
from common_nn import module_tests
from test_nn import new_module_tests
Expand Down
8 changes: 4 additions & 4 deletions test/onnx/model_defs/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .squeezenet import *
from .super_resolution import *
from .op_test import *
from .srresnet import *
from .squeezenet import * # noqa: F401
from .super_resolution import * # noqa: F401
from .op_test import * # noqa: F401
from .srresnet import * # noqa: F401
1 change: 0 additions & 1 deletion test/onnx/model_defs/squeezenet.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import math
import torch
import torch.nn as nn
import torch.nn.init as init
Expand Down
1 change: 0 additions & 1 deletion test/onnx/model_defs/super_resolution.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import torch
import torch.nn as nn
import torch.nn.init as init

Expand Down
1 change: 0 additions & 1 deletion test/onnx/model_defs/word_language_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import torch
import torch.nn as nn
from torch.autograd import Variable


class RNNModel(nn.Module):
Expand Down
11 changes: 1 addition & 10 deletions test/onnx/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn

from model_defs.mnist import MNIST
from model_defs.word_language_model import RNNModel
from model_defs.squeezenet import SqueezeNet
from model_defs.super_resolution import SuperResolutionNet
from model_defs.srresnet import SRResNet
Expand All @@ -17,17 +16,9 @@
import torch
import torch.onnx
import torch.onnx.utils
from torch.autograd import Variable, Function
from torch.nn import Module
from torch.autograd import Variable
from torch.onnx import OperatorExportTypes

import onnx
import onnx.checker
import onnx.helper

import google.protobuf.text_format

import io
import unittest

import caffe2.python.onnx.backend as backend
Expand Down
4 changes: 1 addition & 3 deletions test/onnx/test_operators.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from test_pytorch_common import TestCase, run_tests, skipIfNoLapack, flatten
from test_pytorch_common import TestCase, run_tests, flatten

import torch
import torch.onnx
Expand All @@ -10,11 +10,9 @@
import io
import unittest
import inspect
import argparse
import glob
import os
import shutil
import sys
import common_utils as common


Expand Down
2 changes: 1 addition & 1 deletion test/onnx/test_pytorch_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(-1, pytorch_test_dir)

from common_utils import *
from common_utils import * # noqa: F401

torch.set_default_tensor_type('torch.FloatTensor')

Expand Down
1 change: 0 additions & 1 deletion test/onnx/test_pytorch_helper.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Some standard imports
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.onnx
import torch.nn.init as init
from caffe2.python.model_helper import ModelHelper
Expand Down
1 change: 0 additions & 1 deletion test/onnx/test_pytorch_onnx_caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from __future__ import print_function
from __future__ import unicode_literals

from functools import wraps
import numpy as np
import sys
import unittest
Expand Down
1 change: 0 additions & 1 deletion test/optim/test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import json
import torch
import torch.legacy.optim as optim
from pprint import pprint


def rosenbrock(tensor):
Expand Down
13 changes: 5 additions & 8 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,31 +8,28 @@
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul, itemgetter
from functools import reduce, wraps
from operator import mul
from functools import reduce
from torch._six import inf, nan, istuple
from torch.autograd.gradcheck import gradgradcheck, gradcheck
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import profile
from torch.utils.checkpoint import checkpoint
from common_utils import (TEST_MKL, TestCase, run_tests, skipIfNoLapack,
suppress_warnings, skipIfRocm,
prod_single_zero, random_square_matrix_of_rank,
random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, make_nonzero_det,
random_fullrank_matrix_distinct_singular_value, load_tests)
load_tests)
from common_cuda import TEST_CUDA
from torch.autograd import Variable, Function, detect_anomaly
from torch.autograd.function import InplaceFunction
from torch.testing import make_non_contiguous, randn_like
from torch.testing import randn_like
from common_methods_invocations import (method_tests,
create_input, unpack_variables,
EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK,
EXCLUDE_GRADGRADCHECK,
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME,
exclude_tensor_method,
mask_not_all_zeros,
L, S)
S)

# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
Expand Down
6 changes: 2 additions & 4 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
import io
import math
import tempfile
import re
import unittest
import sys
from itertools import repeat
Expand All @@ -19,9 +17,9 @@
from test_torch import _TestTorchMixin

from common_methods_invocations import tri_tests_args, tri_large_tests_args, \
run_additional_tri_tests, _compare_trilu_indices, _compare_large_trilu_indices
_compare_trilu_indices, _compare_large_trilu_indices
from common_utils import TestCase, get_gpu_type, to_gpu, freeze_rng_state, run_tests, \
PY3, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm, TEST_NUMPY, TEST_WITH_ROCM, load_tests, iter_indices
PY3, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm, TEST_NUMPY, TEST_WITH_ROCM, load_tests

# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
Expand Down
3 changes: 0 additions & 3 deletions test/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,10 @@
import errno
import os
import ctypes
import signal
import torch
import gc
import time
import traceback
import unittest
import subprocess
import itertools
import warnings
from torch import multiprocessing as mp
Expand Down
2 changes: 0 additions & 2 deletions test/test_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,9 @@
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from common_utils import TestCase, run_tests
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT
import common_utils as common

BACKEND = os.environ["BACKEND"]
TEMP_DIR = os.environ["TEMP_DIR"]
Expand Down
2 changes: 0 additions & 2 deletions test/test_docs_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
import unittest
import os
import re
import ast
import _ast
import textwrap


Expand Down
1 change: 0 additions & 1 deletion test/test_indexing.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from common_utils import TestCase, run_tests
import torch
import warnings
from torch import tensor
import unittest

Expand Down
Loading

0 comments on commit 173f224

Please sign in to comment.