Skip to content
Permalink
Browse files

Add Python code formatting with autopep8 (#3298)

Summary:
**Summary**
This commit adds automatic formatting of Python code in `utils` and
`torch_glow` (just the tests) using `autopep8`, a Python autoformatting tool.
`README.md` has also been updated to include installation instructions
for `autopep8` via `brew`.

**Test Plan**
This commit also includes the files modified by running the updated
version of `utils/format.sh`.

**Documentation**
`README.md` has also been updated to include installation instructions for `autopep8` via brew.
Pull Request resolved: #3298

Differential Revision: D16514542

Pulled By: SplitInfinity

fbshipit-source-id: d466d9d78dad20aed915b3f17a2bcca4787f4bc8
  • Loading branch information...
SplitInfinity authored and facebook-github-bot committed Jul 26, 2019
1 parent bdf0a04 commit 59f26ae896a45b927fd6810544a85898816bf2bd
@@ -67,8 +67,8 @@ else
sudo apt-get install -y libpng-dev libgoogle-glog-dev
fi

# Install ninja and (newest version of) cmake through pip
sudo pip install ninja cmake
# Install ninja, (newest version of) cmake and autopep8 through pip
sudo pip install ninja cmake autopep8
hash cmake ninja

# Build glow
@@ -79,7 +79,7 @@ Install the required dependencies using either [Homebrew](https://brew.sh/) or
[MacPorts](https://www.macports.org/). If using Homebrew, run:

```bash
brew install cmake graphviz libpng ninja protobuf wget glog
brew install cmake graphviz libpng ninja protobuf wget glog autopep8
brew install llvm@7
```

@@ -4,13 +4,15 @@
x = torch.randn(4)
y = torch.randn(4)


@torch.jit.script
def foo(a, b):
c = a.mul(b)
a = c.mul(c)
a = c.mul(a)
d = c.div(a)
return d
c = a.mul(b)
a = c.mul(c)
a = c.mul(a)
d = c.div(a)
return d


print("original jit ir")
print(foo.graph_for(x, y))
@@ -19,10 +21,12 @@ def foo(a, b):

torch_glow.enableFusionPass()


@torch.jit.script
def foo_glow(a, b):
return foo(a, b)


print("glow jit ir")
print(foo_glow.graph_for(x, y))

@@ -7,19 +7,26 @@

import argparse


def load_image(image_path):
image = Image.open(image_path).convert('RGB')
transformed_image = transform_image(image)
return torch.reshape(transformed_image, (1, 3, 224, 224))

# given a PIL image, transform it to a normalized tensor for classification.


def transform_image(image):
image = torchvisionTransforms.resize(image, 256)
image = torchvisionTransforms.center_crop(image, 224)
image = torchvisionTransforms.to_tensor(image)
image = torchvisionTransforms.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
image = torchvisionTransforms.normalize(
image, mean=[
0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
return image


def run_model(model, image, use_glow, print_graph):
if use_glow:
torch_glow.enableFusionPass()
@@ -30,23 +37,39 @@ def run_model(model, image, use_glow, print_graph):
all_outputs = traced(image)
topk = all_outputs.topk(5)
return(topk[1], topk[0])



def run():
parser = argparse.ArgumentParser()
parser.add_argument("--image", type=str, required=True, help="Location of the image to be classified")
parser.add_argument("--k", type=int, default=5, help="how many results to show")
parser.add_argument("--skip_glow", action='store_true', default=False, help="Don't run using Glow")
parser.add_argument("--print_graph", action='store_true', default=False, help="Don't run using Glow")
parser.add_argument("--image", type=str, required=True,
help="Location of the image to be classified")
parser.add_argument(
"--k",
type=int,
default=5,
help="how many results to show")
parser.add_argument(
"--skip_glow",
action='store_true',
default=False,
help="Don't run using Glow")
parser.add_argument(
"--print_graph",
action='store_true',
default=False,
help="Don't run using Glow")
args = parser.parse_args()

image = load_image(args.image)
model = resnet.resnet18(pretrained=True, progress=True)
model.eval()
use_glow = not args.skip_glow

(indices, scores) = run_model(model, image, use_glow=use_glow, print_graph=args.print_graph)
print ("rank", "class", "P")

(indices, scores) = run_model(model, image,
use_glow=use_glow, print_graph=args.print_graph)
print("rank", "class", "P")
for i in xrange(args.k):
print(i, int(indices[0][i]), float(scores[0][i]))

run()
print(i, int(indices[0][i]), float(scores[0][i]))


run()
@@ -54,14 +54,25 @@

# parse known arguments
parser = argparse.ArgumentParser()
parser.add_argument("--run_cmake", action='store_true', default=False, help="Run cmake")
parser.add_argument("--release", action='store_true', default=False, help="Compile with debug on")
parser.add_argument("--cmake_prefix_path", type=str, help="Populates -DCMAKE_PREFIX_PATH")
parser.add_argument(
"--run_cmake",
action='store_true',
default=False,
help="Run cmake")
parser.add_argument(
"--release",
action='store_true',
default=False,
help="Compile with debug on")
parser.add_argument(
"--cmake_prefix_path",
type=str,
help="Populates -DCMAKE_PREFIX_PATH")

# restore first and remaining arguments to argv
arg_parse_res = parser.parse_known_args()
args = arg_parse_res[0]
sys.argv = [first_arg] + arg_parse_res[1]
sys.argv = [first_arg] + arg_parse_res[1]


# ################################################################################
@@ -72,7 +83,8 @@
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError('Can only cd to absolute path, got: {}'.format(path))
raise RuntimeError(
'Can only cd to absolute path, got: {}'.format(path))
orig_path = os.getcwd()
os.chdir(path)
try:
@@ -93,6 +105,7 @@ class cmake_build(setuptools.Command):
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
"""

def initialize_options(self):
pass

@@ -106,15 +119,17 @@ def _run_cmake(self):
'-DGLOW_BUILD_PYTORCH_INTEGRATION=ON',
'-DBUILD_SHARED_LIBS=OFF',
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DCMAKE_BUILD_TYPE={}'.format('Release' if args.release else 'Debug'),
'-DCMAKE_BUILD_TYPE={}'.format(
'Release' if args.release else 'Debug'),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
# PyTorch cmake args
'-DPYTORCH_DIR={}'.format(
os.path.dirname(os.path.realpath(torch.__file__))),
]

if args.cmake_prefix_path:
cmake_args.append('-DCMAKE_PREFIX_PATH={}'.format(args.cmake_prefix_path))
cmake_args.append(
'-DCMAKE_PREFIX_PATH={}'.format(args.cmake_prefix_path))

if 'CMAKE_ARGS' in os.environ:
extra_cmake_args = shlex.split(os.environ['CMAKE_ARGS'])
@@ -158,7 +173,11 @@ def build_extensions(self):
filename = os.path.basename(self.get_ext_filename(fullname))

src = os.path.join(CMAKE_BUILD_DIR, "torch_glow", "src", filename)
dst = os.path.join(os.path.realpath(self.build_lib), 'torch_glow', filename)
dst = os.path.join(
os.path.realpath(
self.build_lib),
'torch_glow',
filename)
print("dst", dst)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
@@ -5,33 +5,32 @@
from tests.utils import jitVsGlow

# Basic test of the PyTorch adaptive_avg_pool2d Node on Glow.
def test_adaptive_avg_pool2d_basic():

def adaptive_avg_pool2d_basic(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))

inputs = torch.randn(3, 6, 14, 14)
def test_adaptive_avg_pool2d_basic():
def adaptive_avg_pool2d_basic(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))

inputs = torch.randn(3, 6, 14, 14)

jitVsGlow(adaptive_avg_pool2d_basic, inputs)
jitVsGlow(adaptive_avg_pool2d_basic, inputs)


# Test of the PyTorch adaptive_avg_pool2d Node with non-square inputs on Glow.
def test_adaptive_avg_pool2d_nonsquare_inputs():
def adaptive_avg_pool2d_nonsquare_inputs(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))

def adaptive_avg_pool2d_nonsquare_inputs(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))
inputs = torch.randn(3, 6, 13, 14)

inputs = torch.randn(3, 6, 13, 14)

jitVsGlow(adaptive_avg_pool2d_nonsquare_inputs, inputs)
jitVsGlow(adaptive_avg_pool2d_nonsquare_inputs, inputs)


# Test of the PyTorch adaptive_avg_pool2d Node with non-square outputs on Glow.
def test_adaptive_avg_pool2d_nonsquare_outputs():
def adaptive_avg_pool2d_nonsquare_outputs(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))

def adaptive_avg_pool2d_nonsquare_outputs(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))

inputs = torch.randn(3, 6, 14, 14)
inputs = torch.randn(3, 6, 14, 14)

jitVsGlow(adaptive_avg_pool2d_nonsquare_outputs, inputs)
jitVsGlow(adaptive_avg_pool2d_nonsquare_outputs, inputs)
@@ -4,25 +4,28 @@
from tests.utils import jitVsGlow

# Basic test of the PyTorch add Node on Glow.
def test_add_basic():

def add_basic(a, b):

def test_add_basic():
def add_basic(a, b):
c = a.add(b)
return c.add(c)

x = torch.randn(4)
y = torch.randn(4)
x = torch.randn(4)
y = torch.randn(4)

jitVsGlow(add_basic, x, y)

jitVsGlow(add_basic, x, y)

# Test of the PyTorch add_ Node on Glow.
def test_add_inplace():

def add_inplace(a, b):

def test_add_inplace():
def add_inplace(a, b):
c = a.add_(b)
return c.add_(c)

x = torch.randn(4)
y = torch.randn(4)
x = torch.randn(4)
y = torch.randn(4)

jitVsGlow(add_inplace, x, y)
jitVsGlow(add_inplace, x, y)
@@ -5,27 +5,38 @@
from tests.utils import jitVsGlow

# Basic test of the PyTorch batchnorm Node on Glow.


def test_batchnorm_basic():
def batchnorm_basic(inputs, running_mean, running_var):
return F.batch_norm(inputs, running_mean, running_var)

def batchnorm_basic(inputs, running_mean, running_var):
return F.batch_norm(inputs, running_mean, running_var)
inputs = torch.randn(1, 4, 5, 5)
running_mean = torch.rand(4)
running_var = torch.rand(4)

inputs = torch.randn(1, 4, 5, 5)
running_mean = torch.rand(4)
running_var = torch.rand(4)
jitVsGlow(batchnorm_basic, inputs, running_mean, running_var)

jitVsGlow(batchnorm_basic, inputs, running_mean, running_var)

# Test of the PyTorch batchnorm Node with weights and biases on Glow.
def test_batchnorm_with_weights():

def batchnorm_with_weights(inputs, weight, bias, running_mean, running_var):
return F.batch_norm(inputs, running_mean, running_var, weight=weight, bias=bias)

inputs = torch.randn(1, 4, 5, 5)
weight = torch.rand(4)
bias = torch.rand(4)
running_mean = torch.rand(4)
running_var = torch.rand(4)

jitVsGlow(batchnorm_with_weights, inputs, weight, bias, running_mean, running_var)
def test_batchnorm_with_weights():
def batchnorm_with_weights(
inputs, weight, bias, running_mean, running_var):
return F.batch_norm(inputs, running_mean,
running_var, weight=weight, bias=bias)

inputs = torch.randn(1, 4, 5, 5)
weight = torch.rand(4)
bias = torch.rand(4)
running_mean = torch.rand(4)
running_var = torch.rand(4)

jitVsGlow(
batchnorm_with_weights,
inputs,
weight,
bias,
running_mean,
running_var)

0 comments on commit 59f26ae

Please sign in to comment.
You can’t perform that action at this time.