Skip to content

Commit

Permalink
Add profiling error info (#91)
Browse files Browse the repository at this point in the history
  • Loading branch information
JiahangXu committed Feb 7, 2023
1 parent ffd51e3 commit bd6bcb2
Show file tree
Hide file tree
Showing 7 changed files with 61 additions and 48 deletions.
10 changes: 6 additions & 4 deletions .github/workflows/integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ jobs:
steps:
- uses: actions/checkout@v2

- name: Set up Python 3.6
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.6.10
python-version: 3.7

- name: Cache
uses: actions/cache@v2
Expand All @@ -32,14 +32,16 @@ jobs:

- name: Install dependencies
run: |
pip install tensorflow==1.15.0
pip install tensorflow==2.6.0
pip install onnx==1.9.0
pip install torch==1.9.0
pip install torchvision==0.10.0
pip install onnx-simplifier
- name: Install nn-Meter
run: pip install -U .
run: |
pip install -U .
pip install protobuf==3.20.3
- name: Integration test
run: python tests/integration_test/test_latency_predictor.py
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/integration-test_nni_based_torch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ jobs:
steps:
- uses: actions/checkout@v2

- name: Set up Python 3.6
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.6.10
python-version: 3.7

- name: Cache
uses: actions/cache@v2
Expand Down
8 changes: 5 additions & 3 deletions .github/workflows/integration-test_onnx_based_torch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ jobs:
steps:
- uses: actions/checkout@v2

- name: Set up Python 3.6
- name: Set up Python 3.7
uses: actions/setup-python@v2
with:
python-version: 3.6.10
python-version: 3.7

- name: Cache
uses: actions/cache@v2
Expand All @@ -38,7 +38,9 @@ jobs:
pip install onnx-simplifier==0.3.6
- name: Install nn-Meter
run: pip install -U .
run: |
pip install -U .
pip install protobuf==3.20.3
- name: Integration test
run: python tests/integration_test/test_latency_predictor_torch.py --apply-onnx
Expand Down
7 changes: 4 additions & 3 deletions nn_meter/builder/backend_meta/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ def dump_profiled_results(results, detail = False, metrics = ["latency"]):
def read_profiled_results(results):
results_copy = copy.deepcopy(results)
for item in results_copy.values():
for model in item.values():
if 'latency' in model:
model['latency'] = Latency(model['latency'])
if isinstance(item, dict):
for model in item.values():
if 'latency' in model:
model['latency'] = Latency(model['latency'])
return results_copy
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,10 @@ def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, labs_pa
except:
pass

if len(features) == 0:
raise ValueError(f"Didn't find any data of {kernel_type} for predictor training. There maybe some error about model profiling. Please check the profiling error in " \
"`<workspace>/predictor_build/results/profile_error.log`")

# save features and latency information to `save_path`
if save_path:
import pandas as pd
Expand Down
58 changes: 29 additions & 29 deletions nn_meter/ir_converter/frozenpb_converter/shape_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class ShapeInference:
"Relu6",
"Selu",
"LeakyReLU",
"Elu"
"Elu",
"Softmax",

"NoOp"
Expand All @@ -68,7 +68,7 @@ def eval_prodcast(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
input_nodes = node["inbounds"]
Expand Down Expand Up @@ -170,7 +170,7 @@ def Const_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return [], [node["attr"]["attr"]["tensor_shape"]]
Expand All @@ -185,7 +185,7 @@ def Identity_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return [], [graph[node["inbounds"][0]]["attr"]["output_shape"][0]]
Expand All @@ -200,7 +200,7 @@ def Pad_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
in_shape = [graph[node["inbounds"][0]]["attr"]["output_shape"][0]]
Expand All @@ -220,7 +220,7 @@ def PadV2_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pad_get_shape(graph, node)
Expand All @@ -235,7 +235,7 @@ def propagate_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
logging.info("Propagate through op %s.", node["attr"]["name"])
Expand All @@ -251,7 +251,7 @@ def Pool_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
if len(node["inbounds"]) != 1:
Expand Down Expand Up @@ -307,7 +307,7 @@ def AvgPool_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pool_get_shape(graph, node)
Expand All @@ -321,7 +321,7 @@ def AveragePooling2D_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pool_get_shape(graph, node)
Expand All @@ -335,7 +335,7 @@ def MaxPool_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pool_get_shape(graph, node)
Expand All @@ -349,7 +349,7 @@ def MaxPoolV2_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pool_get_shape(graph, node)
Expand All @@ -363,7 +363,7 @@ def MaxPooling2D_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Pool_get_shape(graph, node)
Expand All @@ -378,7 +378,7 @@ def Placeholder_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return [], [node["attr"]["attr"]["shape"]]
Expand All @@ -392,7 +392,7 @@ def Conv2D_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
weight_node = ph.find_weights_root(graph, node)
Expand Down Expand Up @@ -475,7 +475,7 @@ def DepthwiseConv2dNative_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
weight_node = ph.find_weights_root(graph, node)
Expand Down Expand Up @@ -559,7 +559,7 @@ def Reduce_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
input_shape = graph[node["inbounds"][0]]["attr"]["output_shape"][0]
Expand Down Expand Up @@ -591,7 +591,7 @@ def Mean_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Reduce_get_shape(graph, node)
Expand All @@ -605,7 +605,7 @@ def GlobalAveragePooling2D_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Reduce_get_shape(graph, node)
Expand All @@ -619,7 +619,7 @@ def GlobalMaxPooling2D_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Reduce_get_shape(graph, node)
Expand All @@ -633,7 +633,7 @@ def MatMul_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
weight_node = ph.find_weights_root(graph, node)
Expand Down Expand Up @@ -694,7 +694,7 @@ def Reshape_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
if "shape" in node["attr"]["attr"].keys():
Expand Down Expand Up @@ -751,7 +751,7 @@ def Concat_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
input_shape = []
Expand Down Expand Up @@ -780,7 +780,7 @@ def Concatenate_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Concat_get_shape(graph, node)
Expand All @@ -794,7 +794,7 @@ def ConcatV2_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
return ShapeInference.Concat_get_shape(graph, node)
Expand All @@ -809,7 +809,7 @@ def Split_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
for in_node in node["inbounds"]:
Expand Down Expand Up @@ -839,7 +839,7 @@ def Transpose_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
for in_node in node["inbounds"]:
Expand Down Expand Up @@ -875,7 +875,7 @@ def Pack_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
seq = ph.get_graph_seq(graph, [node["attr"]["name"]])[:5]
Expand All @@ -898,7 +898,7 @@ def StridedSlice_get_shape(graph, node):
----------
graph : dict
The Graph IR in dict format.
node : dict
node : dict
The node in Graph IR in dict format.
"""
seq = ph.get_graph_seq(graph, [node["attr"]["name"]])[:5]
Expand Down
18 changes: 11 additions & 7 deletions tests/integration_test/test_latency_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,15 +79,19 @@ def integration_test(model_type, url, ppath, output_name = "tests/integration_te

# start testing
for pred_name, pred_version in get_predictors():
try:
since = time.time()
# print(f'nn-meter --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}')
result = subprocess.check_output(['nn-meter', 'predict', f'--{model_type}', f'{ppath}', '--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}'])
runtime = time.time() - since
except NotImplementedError:
logging.error(f"Meets ERROR when checking --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}")
# try:
since = time.time()
# print(f'nn-meter --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}')
result = subprocess.check_output(['nn-meter', 'predict', f'--{model_type}', f'{ppath}', '--predictor', f'{pred_name}', '--predictor-version', f'{pred_version}'])
runtime = time.time() - since
# except NotImplementedError:
# logging.error(f"Meets ERROR when checking --{model_type} {ppath} --predictor {pred_name} --predictor-version {pred_version}")

latency_list = parse_latency_info(result.decode('utf-8'))
print(model_type)
print(latency_list)
print("-----")
os.system("cat tests/integration_test/test_result.txt")
for model, latency in latency_list:
item = f'{model}, {model_type}, {pred_name}, {pred_version}, {round(float(latency), 4)}\n'
# print(item)
Expand Down

0 comments on commit bd6bcb2

Please sign in to comment.