-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_precompiled.py
179 lines (149 loc) · 5.24 KB
/
run_precompiled.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
from numpy.core.numeric import full
import tvm
from tvm.contrib import utils, graph_executor as runtime
from tvm.relay.op.nn.nn import conv2d, dilate
#####
import numpy as np
import os
import time
import platform
import sys
#from math import prod
import json
import pickle
### pip install func-timeout
from func_timeout import func_timeout, FunctionTimedOut
#####
from profile_config import *
print("exploring precompiled modules from ", module_path+"/")
print()
module_options = set(os.listdir(module_path))
print(len(module_options), "found")
print()
state = {}
full_state_path = full_state_path+"_"+"precomp"
state_file += "_"+"precomp"
if os.path.exists(full_state_path):
state = pickle.load(open(full_state_path, "rb"))
print("found state")
print(len(state["done"]), "already profiled")
else:
state = {}
state["done"] = set()
pickle.dump(state, open(full_state_path, "wb"))
module_options = module_options.difference(state["done"])
for option in module_options:
state["done"].add(option)
pickle.dump(state, open(full_state_path, "wb"))
full_path = module_path+"/"+option+"/"
print(full_path)
files = os.listdir(full_path)
inp_shape = None
inp_data = None
params = None
graph_json = None
module = None
layer_name = None
description_vecs = {}
metas = {}
memory_data = {}
config = None
conf_str = None
if not "data.params" in files:
print(full_path, "lacks data.params file")
continue
if not "compressed_input.data" in files:
print(full_path, "lacks input tensor data")
continue
if not "config.json" in files:
print(full_path, "lacks config.json file")
continue
if not "graph.json" in files:
print(full_path, "lacks graph.json file")
continue
if not "graph.json" in files:
print(full_path, "lacks lib.tar file")
continue
for file in files:
if file == "data.params":
params = pickle.load(open(full_path+file, "rb"))
print("found params")
continue
if file == "compressed_input.data":
raw_input_data, inp_shape = pickle.load(open(full_path+file, "rb"))
print("found compressed input data")
print("expanding inputs")
required = int(np.prod(inp_shape))
rand_repeat = int(np.prod(inp_shape)/np.prod(raw_input_data.shape))
inp_data = np.repeat(raw_input_data, rand_repeat)[:required].reshape(inp_shape).astype("float32")
del raw_input_data
continue
if file == "config.json":
config = json.loads(open(full_path+file, "rb"))
print("found config")
continue
if file == "graph.json":
with open(full_path+file, "r") as f:
graph_json = f.read()
print("found graph JSON")
continue
if file == "lib.tar":
module = tvm.runtime.load_module(full_path+file)
print("found module")
continue
print("LOADING MODULE COMPLETE")
from tvm.contrib.debugger import debug_executor as graph_runtime
debug_g_mod = graph_runtime.GraphModuleDebug(
module["debug_create"]("default", dev),
[dev],
graph_json,
".",
)
try:
debug_g_mod.set_input("data", tvm.nd.array(inp_data.astype("float32")))
#func_timeout(execution_timeout, debug_g_mod.run, None, None)
raw_times = debug_g_mod.run_individual(number=3, repeat=3, min_repeat_ms=300)
except:
print("[WARNING] EXECUTION FAILED")
continue
print("[INFO] EXECUTION COMPLETED")
times = {}
for idx, node in enumerate(debug_g_mod.debug_datum._nodes_list):
times[node["op"]] = raw_times[idx]
if layer_name in node["op"]:
layer_time = raw_times[idx]
actual_func_name = node["op"]
print(layer_time)
config["time"] = layer_time
print("[INFO] EXTRACTED TIME INFORMATION")
if data_collector != None:
# measure power consumption
runs = max(1000, int(time_min_res / layer_time))
print(runs, "repetitions required")
try:
debug_g_mod.set_input("data", tvm.nd.array(inp_data.astype("float32")))
print("input prepared")
test_data = debug_g_mod.profile(collectors=[data_collector], data=tvm.nd.array(inp_data), runs=runs)
print("profiling done")
except:
print("[WARNING] PROFILING FAILED")
continue
print("[INFO] PROFILING COMPLETED")
# store everything
powers = {}
for idx, data in enumerate(test_data.calls):
print(idx)
print(data)
powers[data["Name"]] = profiling.evaluate_power(dict(data))
print()
else:
powers = {}
for layer in description_vecs.keys():
powers[layer] = -1
print()
config["power"] = powers[actual_func_name]
## store result
with open(data_path+"/"+target+"_"+option, "w") as f:
f.write(json.dumps(config))
#serializer.dump_description_vectors(description_vecs, metas, conf_str, device+"_"+target, "serialized"+"_"+layer_name, path="./dataset")
print("RUN COMPLETED")