Skip to content

Commit

Permalink
style: clean up remaining flake8 errors
Browse files Browse the repository at this point in the history
  • Loading branch information
tgamblin committed Aug 5, 2019
1 parent 643df1c commit 412b086
Show file tree
Hide file tree
Showing 18 changed files with 77 additions and 79 deletions.
18 changes: 9 additions & 9 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@

# -- Project information -----------------------------------------------------

project = u"hatchet"
copyright = u"2017-2019, Lawrence Livermore National Security, LLC"
author = u"Abhinav Bhatele"
project = "hatchet"
copyright = "2017-2019, Lawrence Livermore National Security, LLC"
author = "Abhinav Bhatele"

# The short X.Y version
version = u""
version = ""
# The full version, including alpha/beta/rc tags
release = u""
release = ""


# -- General configuration ---------------------------------------------------
Expand Down Expand Up @@ -81,7 +81,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u"_build", "Thumbs.db", ".DS_Store"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
Expand Down Expand Up @@ -159,15 +159,15 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "hatchet.tex", u"hatchet Documentation", u"Abhinav Bhatele", "manual")
(master_doc, "hatchet.tex", "hatchet Documentation", "Abhinav Bhatele", "manual")
]


# -- Options for manual page output ------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "hatchet", u"hatchet Documentation", [author], 1)]
man_pages = [(master_doc, "hatchet", "hatchet Documentation", [author], 1)]


# -- Options for Texinfo output ----------------------------------------------
Expand All @@ -179,7 +179,7 @@
(
master_doc,
"hatchet",
u"hatchet Documentation",
"hatchet Documentation",
author,
"hatchet",
"One line description of project.",
Expand Down
5 changes: 2 additions & 3 deletions examples/caliper.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,8 @@
##############################################################################

from __future__ import print_function
from hatchet import *
import sys
import pandas as pd
import hatchet as ht

pd.set_option("display.width", 1500)
pd.set_option("display.max_colwidth", 20)
Expand All @@ -26,7 +25,7 @@
"hatchet/tests/data/caliper-lulesh-json/lulesh-sample-annotation-profile.json"
)

gf = GraphFrame()
gf = ht.GraphFrame()
gf.from_caliper(filename)

print(gf.dataframe)
Expand Down
6 changes: 3 additions & 3 deletions examples/dag_literal.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,16 @@
##############################################################################

from __future__ import print_function
from hatchet import *
import sys
import pandas as pd
import hatchet as ht


pd.set_option("display.width", 500)
pd.set_option("display.max_colwidth", 30)


if __name__ == "__main__":
gf = GraphFrame()
gf = ht.GraphFrame()
gf.from_literal(
[
{
Expand Down
5 changes: 2 additions & 3 deletions examples/gprof_dot.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,15 @@
##############################################################################

from __future__ import print_function
from hatchet import *
import sys
import pandas as pd
import hatchet as ht

pd.set_option("display.width", 500)
pd.set_option("display.max_colwidth", 30)


if __name__ == "__main__":
gf = GraphFrame()
gf = ht.GraphFrame()
gf.from_gprof_dot("hatchet/tests/data/gprof2dot-cpi/callgrind.dot.64042.0.1")

print(gf.dataframe)
Expand Down
5 changes: 2 additions & 3 deletions examples/hpctoolkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,8 @@
##############################################################################

from __future__ import print_function
from hatchet import *
import sys
import pandas as pd
import hatchet as ht

pd.set_option("display.width", 500)
pd.set_option("display.max_colwidth", 30)
Expand All @@ -23,7 +22,7 @@
if __name__ == "__main__":
dirname = "hatchet/tests/data/hpctoolkit-cpi-database"

gf = GraphFrame()
gf = ht.GraphFrame()
gf.from_hpctoolkit(dirname)

print(gf.dataframe.xs(0, level="rank"))
Expand Down
5 changes: 2 additions & 3 deletions examples/tree_literal.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,15 @@
##############################################################################

from __future__ import print_function
from hatchet import *
import sys
import pandas as pd
import hatchet as ht

pd.set_option("display.width", 500)
pd.set_option("display.max_colwidth", 30)


if __name__ == "__main__":
gf = GraphFrame()
gf = ht.GraphFrame()
gf.from_literal(
[
{
Expand Down
8 changes: 7 additions & 1 deletion hatchet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,10 @@
# Please also read the LICENSE file for the MIT License notice.
##############################################################################

from .graphframe import *
# make flake8 unused names in this file.
# flake8: noqa: F401

from .graphframe import GraphFrame
from .hpctoolkit_reader import HPCToolkitReader
from .caliper_reader import CaliperReader
from .gprof_dot_reader import GprofDotReader
4 changes: 2 additions & 2 deletions hatchet/caliper_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,14 +137,14 @@ def create_graphframe(self):
self.json_cols[idx]
].apply(
lambda x: re.match(
"(.*):(\d+)", self.json_nodes[x]["label"]
r"(.*):(\d+)", self.json_nodes[x]["label"]
).group(1)
)
self.df_json_data["line"] = self.df_json_data[
self.json_cols[idx]
].apply(
lambda x: re.match(
"(.*):(\d+)", self.json_nodes[x]["label"]
r"(.*):(\d+)", self.json_nodes[x]["label"]
).group(2)
)
self.df_json_data.drop(self.json_cols[idx], axis=1, inplace=True)
Expand Down
16 changes: 8 additions & 8 deletions hatchet/external/printtree.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def as_text(
rank,
threshold,
expand_names,
indent=u"",
child_indent=u"",
indent="",
child_indent="",
unicode=False,
color=False,
):
Expand Down Expand Up @@ -107,15 +107,15 @@ def as_text(

# add context (filename etc.) if requested
if context in dataframe.columns:
result = u"{indent}{time_str} {function} {c.faint}{code_position}{c.end}\n".format(
result = "{indent}{time_str} {function} {c.faint}{code_position}{c.end}\n".format(
indent=indent,
time_str=time_str,
function=func_name,
code_position=dataframe.loc[df_index, context],
c=colors_enabled if color else colors_disabled,
)
else:
result = u"{indent}{time_str} {function}\n".format(
result = "{indent}{time_str} {function}\n".format(
indent=indent, time_str=time_str, function=func_name
)

Expand All @@ -136,11 +136,11 @@ def as_text(

for child in children:
if child is not last_child:
c_indent = child_indent + (u"├─ " if unicode else "|- ")
cc_indent = child_indent + (u"│ " if unicode else "| ")
c_indent = child_indent + ("├─ " if unicode else "|- ")
cc_indent = child_indent + ("│ " if unicode else "| ")
else:
c_indent = child_indent + (u"└─ " if unicode else "`- ")
cc_indent = child_indent + u" "
c_indent = child_indent + ("└─ " if unicode else "`- ")
cc_indent = child_indent + " "
result += as_text(
child,
dataframe,
Expand Down
1 change: 0 additions & 1 deletion hatchet/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
# For details, see: https://github.com/LLNL/hatchet
# Please also read the LICENSE file for the MIT License notice.
##############################################################################
import sys

from functools import total_ordering

Expand Down
2 changes: 1 addition & 1 deletion hatchet/gprof_dot_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from .graph import Graph
from .frame import Frame
from .util.timer import Timer
from .util.config import *
from .util.config import dot_keywords


class GprofDotReader:
Expand Down
12 changes: 4 additions & 8 deletions hatchet/graphframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,10 @@ def parse_node_literal(child_dict, hparent):
for i in range(len(graph_dict)):
graph_root = Node(Frame({"name": graph_dict[i]["name"]}), None)

node_dicts.append(
dict(
{"node": graph_root, "name": graph_dict[i]["name"]},
**graph_dict[i]["metrics"]
)
)
node_dict = {"node": graph_root, "name": graph_dict[i]["name"]}
node_dict.update(**graph_dict[i]["metrics"])
node_dicts.append(node_dict)

list_roots.append(graph_root)

# call recursively on all children of root
Expand Down Expand Up @@ -196,7 +194,6 @@ def squash(self):
filtered_nodes = self.dataframe.index

node_clone = {}
old_to_new_id = {}

# function to connect a node to the nearest descendants that are in the
# list of filtered nodes
Expand Down Expand Up @@ -224,7 +221,6 @@ def rewire_tree(node, clone, is_root, roots):
node_label = new_child.frame
if node_label not in label_to_new_child.keys():
new_child_clone = Node(new_child.frame, clone)
idx = squ_idx
squ_idx += 1
clone.add_child(new_child_clone)
label_to_new_child[node_label] = new_child_clone
Expand Down
6 changes: 3 additions & 3 deletions hatchet/hpctoolkit_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def read_metricdb_file(args):
""" Read a single metricdb file into a 1D array """
filename, num_nodes, num_metrics, shape = args
rank = int(
re.search("\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(1)
re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(1)
)

with open(filename, "rb") as metricdb:
Expand Down Expand Up @@ -84,8 +84,8 @@ def __init__(self, dir_name):
# Read one metric-db file to extract the number of nodes in the CCT
# and the number of metrics
with open(metricdb_files[0], "rb") as metricdb:
tag = metricdb.read(18)
version = metricdb.read(5)
metricdb.read(18) # skip tag
metricdb.read(5) # skip version TODO: should we?
endian = metricdb.read(1)

if endian == b"b":
Expand Down
3 changes: 0 additions & 3 deletions hatchet/tests/callgrind.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@
# Please also read the LICENSE file for the MIT License notice.
##############################################################################

import sys
import pytest

from hatchet import GraphFrame, GprofDotReader

roots = [
Expand Down
4 changes: 2 additions & 2 deletions hatchet/util/timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ def end_phase(self):

def __str__(self):
out = StringIO()
out.write(u"Times:\n")
out.write("Times:\n")
for phase, delta in self._times.items():
out.write(u" %-20s %.2fs\n" % (phase + ":", delta.total_seconds()))
out.write(" %-20s %.2fs\n" % (phase + ":", delta.total_seconds()))
return out.getvalue()

@contextmanager
Expand Down
18 changes: 10 additions & 8 deletions scripts/metricdb_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,16 @@
# Please also read the LICENSE file for the MIT License notice.
##############################################################################

from __future__ import print_function

import glob
import struct
import numpy as np

# np.set_printoptions(threshold=np.inf)

# Read all .metric-db files in the current directory
mdbfiles = glob.glob('*.metric-db')
mdbfiles = glob.glob("*.metric-db")
num_pes = len(mdbfiles)

# Read header from one of the .metric-db files
Expand All @@ -28,15 +31,15 @@
endian = metricdb.read(1)

# Big endian
if endian == 'b':
num_nodes = struct.unpack('>i', metricdb.read(4))[0]
num_metrics = struct.unpack('>i', metricdb.read(4))[0]
if endian == "b":
num_nodes = struct.unpack(">i", metricdb.read(4))[0]
num_metrics = struct.unpack(">i", metricdb.read(4))[0]
# TODO: complete for litte endian

metricdb.close()

print "Tag: %s Version: %s Endian: %s" % (tag, version, endian)
print "Files: %d Nodes: %d Metrics: %d" % (num_pes, num_nodes, num_metrics)
print("Tag: %s Version: %s Endian: %s" % (tag, version, endian))
print("Files: %d Nodes: %d Metrics: %d" % (num_pes, num_nodes, num_metrics))

# Create a single metrics array
metrics = np.empty([num_pes, num_nodes, num_metrics])
Expand All @@ -47,8 +50,7 @@
metricdb.seek(32)
# currently assumes a big endian binary and reads all the metrics at once
# into a numpy array
arr = np.fromfile(metricdb, dtype=np.dtype('>f8'),
count = num_nodes * num_metrics)
arr = np.fromfile(metricdb, dtype=np.dtype(">f8"), count=num_nodes * num_metrics)
metrics[index] = arr.reshape(num_nodes, num_metrics)
# alternate method of reading the file one metric at a time
# for i in range(0, num_nodes):
Expand Down
Loading

0 comments on commit 412b086

Please sign in to comment.