forked from fastai/fastai
-
Notifications
You must be signed in to change notification settings - Fork 1
/
test_utils.py
163 lines (130 loc) · 5.52 KB
/
test_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/97_test_utils.ipynb (unless otherwise specified).
__all__ = ['synth_dbunch', 'RegModel', 'synth_learner', 'VerboseCallback', 'get_env', 'try_import', 'nvidia_smi',
'nvidia_mem', 'show_install']
# Cell
from .imports import *
from .data.all import *
from .optimizer import *
from .learner import *
from .callback.core import *
from torch.utils.data import TensorDataset
# Cell
from torch.utils.data import TensorDataset
# Cell
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2, cuda=False):
def get_data(n):
x = torch.randn(bs*n, 1)
return TensorDataset(x, a*x + b + 0.1*torch.randn(bs*n, 1))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
device = default_device() if cuda else None
train_dl = TfmdDL(train_ds, bs=bs, shuffle=True, num_workers=0)
valid_dl = TfmdDL(valid_ds, bs=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl, device=device)
# Cell
class RegModel(Module):
def __init__(self): self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
# Cell
@delegates(Learner.__init__)
def synth_learner(n_trn=10, n_val=2, cuda=False, lr=1e-3, data=None, model=None, **kwargs):
if data is None: data=synth_dbunch(n_train=n_trn,n_valid=n_val, cuda=cuda)
if model is None: model=RegModel()
return Learner(data, model, lr=lr, loss_func=MSELossFlat(),
opt_func=partial(SGD, mom=0.9), **kwargs)
# Cell
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# Cell
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
# Cell
def try_import(module):
"Try to import `module`. Returns module's object on success, None on failure"
try: return importlib.import_module(module)
except: return None
# Cell
def nvidia_smi(cmd = "nvidia-smi"):
try: res = run(cmd)
except OSError as e: return None
return res
# Cell
def nvidia_mem():
try: mem = run("nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader")
except: return None
return mem.strip().split('\n')
# Cell
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information"
import fastai, platform, fastprogress
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__version__])
rep.append(["fastprogress", fastprogress.__version__])
rep.append(["torch", torch.__version__])
# nvidia-smi
smi = nvidia_smi()
if smi:
match = re.findall(r'Driver Version: +(\d+\.\d+)', smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
# no point reporting on cudnn if cuda is not available, as it
# seems to be enabled at times even on cpu-only setups
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
gpu_total_mem = []
nvidia_gpu_cnt = 0
if smi:
mem = nvidia_mem()
nvidia_gpu_cnt = len(ifnone(mem, []))
if nvidia_gpu_cnt: rep.append(["nvidia gpus", nvidia_gpu_cnt])
torch_gpu_cnt = torch.cuda.device_count()
if torch_gpu_cnt:
rep.append(["torch devices", torch_gpu_cnt])
# information for each gpu
for i in range(torch_gpu_cnt):
rep.append([f" - gpu{i}", (f"{gpu_total_mem[i]}MB | " if gpu_total_mem else "") + torch.cuda.get_device_name(i)])
else:
if nvidia_gpu_cnt:
rep.append([f"Have {nvidia_gpu_cnt} GPU(s), but torch can't use them (check nvidia driver)", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == 'Linux':
distro = try_import('distro')
if distro:
# full distro info
rep.append(["distro", ' '.join(distro.linux_distribution())])
else:
opt_mods.append('distro');
# partial distro info
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env('CONDA_DEFAULT_ENV')])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {' '.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information")