/
gpu_utils.py
54 lines (35 loc) · 1.4 KB
/
gpu_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def parallel_gpu_jobs(allow_growth=True, fraction=.5):
'''Sets the max used memory as a fraction for tensorflow
backend
allow_growth :: True of False
fraction :: a float value (e.g. 0.5 means 4gb out of 8gb)
'''
import keras.backend as K
import tensorflow as tf
gpu_options = K.tf.GPUOptions(allow_growth=allow_growth,
per_process_gpu_memory_fraction=fraction)
config = tf.ConfigProto(gpu_options=gpu_options)
session = K.tf.Session(config=config)
K.set_session(session)
def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False):
'''Takes as input the model, and returns a model
based on the number of GPUs available on the machine
or alternatively the 'gpus' user input.
NOTE: this needs to be used before model.compile() in the
model inputted to Scan in the form:
from talos.utils.gpu_utils import multi_gpu
model = multi_gpu(model)
'''
from keras.utils import multi_gpu_model
return multi_gpu_model(model,
gpus=gpus,
cpu_merge=cpu_merge,
cpu_relocation=cpu_relocation)
def force_cpu():
'''Force CPU on a GPU system
'''
import keras.backend as K
import tensorflow as tf
config = tf.ConfigProto(device_count={'GPU': 0})
session = tf.Session(config=config)
K.set_session(session)